hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
67c2923790b9e846bc7acb77d6462d4909cdce59
6,169
use crate::Error; use std::str::Chars; use std::str::FromStr; #[derive(PartialEq, Debug, Clone)] pub enum LayoutType { LeftRight, TopBottom, WindowPane, } impl Default for LayoutType { fn default() -> LayoutType { LayoutType::WindowPane } } impl FromStr for LayoutCell { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut chars = s.chars(); let state = LayoutFSMState::X; let mut layout: LayoutCell = Default::default(); layout.fsm(&mut chars, state)?; Ok(layout) } } // NOTE: tmux source: layout_custom.c // XXX: checksum can be improved using hex crate // XXX: implemet trait parse FromStr? #[derive(Default, PartialEq, Clone, Debug)] pub struct LayoutCell { pub x: usize, pub y: usize, pub x_off: usize, pub y_off: usize, pub id: Option<usize>, pub style: LayoutType, pub cells: Option<Vec<LayoutCell>>, } #[derive(PartialEq, Clone, Debug)] pub enum LayoutFSMState { X, Y, XOff, YOff, Id, LeftRight, TopBottom, EndNested, EOL, // Error } impl LayoutCell { pub fn new( x: usize, y: usize, x_off: usize, y_off: usize, id: Option<usize>, style: LayoutType, cells: Option<Vec<LayoutCell>>, ) -> Self { LayoutCell { x, y, x_off, y_off, id, style, cells, } } // TODO: optimization pub fn fsm( &mut self, chars: &mut Chars, mut state: LayoutFSMState, ) -> Result<LayoutFSMState, Error> { let mut child: LayoutCell; let mut buff = String::new(); loop { if let Some(chr) = chars.next() { match (chr, &state) { // end of x element ('x', LayoutFSMState::X) => { self.x = buff.parse()?; state = LayoutFSMState::Y; buff = String::from(""); } // end of y element (',', LayoutFSMState::Y) => { self.y = buff.parse()?; state = LayoutFSMState::XOff; buff = String::from(""); } // end of x_off element (',', LayoutFSMState::XOff) => { self.x_off = buff.parse()?; state = LayoutFSMState::YOff; buff = String::from(""); } // end of y_off element (',', LayoutFSMState::YOff) => { self.y_off = buff.parse()?; state = LayoutFSMState::Id; buff = String::from(""); } // end of id element (',', LayoutFSMState::Id) => { self.id = buff.parse().ok(); state = LayoutFSMState::X; break; } // end of {} or [] group (',', LayoutFSMState::EndNested) => { state = LayoutFSMState::X; break; } // end of id element inside [] group (']', LayoutFSMState::Id) => { self.id = buff.parse().ok(); state = LayoutFSMState::EndNested; break; } // end of id element inside {} group ('}', LayoutFSMState::Id) => { self.id = buff.parse().ok(); state = LayoutFSMState::EndNested; break; } //(' ', s) => { s }, // end of y_off element before [] group ('[', LayoutFSMState::YOff) => { self.y_off = buff.parse()?; self.id = None; self.style = LayoutType::TopBottom; self.cells = Some(Vec::new()); loop { child = Default::default(); // TODO: remove unwrap state = child.fsm(chars, LayoutFSMState::X).unwrap(); if let Some(c) = self.cells.as_mut() { c.push(child) } if state == LayoutFSMState::EndNested || state == LayoutFSMState::EOL { break; } } } // end of y_off element before {} group ('{', LayoutFSMState::YOff) => { self.y_off = buff.parse()?; self.id = None; self.style = LayoutType::LeftRight; self.cells = Some(Vec::new()); loop { child = Default::default(); // TODO: remove unwrap state = child.fsm(chars, LayoutFSMState::X).unwrap(); if let Some(c) = self.cells.as_mut() { c.push(child) } if state == LayoutFSMState::EndNested || state == LayoutFSMState::EOL { break; } } } (c, _) => { buff.push(c); } } } else { // end of line and id element if state == LayoutFSMState::Id { self.id = buff.parse().ok(); } state = LayoutFSMState::EOL; break; } } Ok(state) } }
32.130208
99
0.380289
1ce07202cec58613d2c6e39d772ce11d1851bd04
587
use crate::common::{Identifier, ParameterType, Store}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, JsonSchema, Serialize)] #[serde(rename = "parameter")] pub struct Parameter { /// Identifier of the parameter, used to identify it pub id: Identifier, /// Type of the data value for this parameter #[serde(flatten)] pub kind: ParameterType, // The Identifier of the store from which the parameter value can be resolved (as a credential) #[serde(default, skip_serializing_if = "Option::is_none")] pub store: Option<Store>, }
34.529412
97
0.730835
8f93f3558e831683a26ae2e69731ac033c035bd3
44,495
//! The `shred` module defines data structures and methods to pull MTU sized data frames from the network. use crate::blocktree::BlocktreeError; use crate::entry::create_ticks; use crate::entry::Entry; use crate::erasure::Session; use crate::result; use crate::result::Error; use bincode::serialized_size; use core::cell::RefCell; use lazy_static::lazy_static; use rayon::iter::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; use rayon::slice::ParallelSlice; use rayon::ThreadPool; use serde::{Deserialize, Serialize}; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::hash::Hash; use solana_sdk::packet::PACKET_DATA_SIZE; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use std::io; use std::io::{Error as IOError, ErrorKind}; use std::sync::Arc; use std::time::Instant; lazy_static! { pub static ref SIZE_OF_CODING_SHRED_HEADER: usize = { serialized_size(&CodingShredHeader::default()).unwrap() as usize }; pub static ref SIZE_OF_DATA_SHRED_HEADER: usize = { serialized_size(&DataShredHeader::default()).unwrap() as usize }; pub static ref SIZE_OF_SHRED_HEADER: usize = { serialized_size(&ShredHeader::default()).unwrap() as usize }; static ref SIZE_OF_SIGNATURE: usize = { bincode::serialized_size(&Signature::default()).unwrap() as usize }; pub static ref SIZE_OF_SHRED_TYPE: usize = { bincode::serialized_size(&ShredType(DATA_SHRED)).unwrap() as usize }; } thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .build() .unwrap())); /// The constants that define if a shred is data or coding pub const DATA_SHRED: u8 = 0b1010_0101; pub const CODING_SHRED: u8 = 0b0101_1010; /// This limit comes from reed solomon library, but unfortunately they don't have /// a public constant defined for it. pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 16; /// Based on rse benchmarks, the optimal erasure config uses 16 data shreds and 4 coding shreds pub const RECOMMENDED_FEC_RATE: f32 = 0.25; const LAST_SHRED_IN_SLOT: u8 = 0b0000_0001; pub const DATA_COMPLETE_SHRED: u8 = 0b0000_0010; #[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] pub struct ShredType(pub u8); /// A common header that is present in data and code shred headers #[derive(Serialize, Clone, Deserialize, Default, PartialEq, Debug)] pub struct ShredCommonHeader { pub signature: Signature, pub slot: u64, pub index: u32, } /// The data shred header has parent offset and flags #[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)] pub struct DataShredHeader { pub common_header: ShredCommonHeader, pub parent_offset: u16, pub flags: u8, } /// The coding shred header has FEC information #[derive(Serialize, Clone, Default, Deserialize, PartialEq, Debug)] pub struct CodingShredHeader { pub common_header: ShredCommonHeader, pub num_data_shreds: u16, pub num_coding_shreds: u16, pub position: u16, } /// A common header that is present at start of every shred #[derive(Serialize, Clone, Deserialize, PartialEq, Debug)] pub struct ShredHeader { pub shred_type: ShredType, pub coding_header: CodingShredHeader, pub data_header: DataShredHeader, } impl Default for ShredHeader { fn default() -> Self { ShredHeader { shred_type: ShredType(DATA_SHRED), coding_header: CodingShredHeader::default(), data_header: DataShredHeader::default(), } } } #[derive(Clone, Debug, PartialEq)] pub struct Shred { pub headers: ShredHeader, pub payload: Vec<u8>, } impl Shred { fn new(header: ShredHeader, shred_buf: Vec<u8>) -> Self { Shred { headers: header, payload: shred_buf, } } pub fn new_from_data( slot: u64, index: u32, parent_offset: u16, data: Option<&[u8]>, is_last_data: bool, is_last_in_slot: bool, ) -> Self { let mut shred_buf = vec![0; PACKET_DATA_SIZE]; let mut header = ShredHeader::default(); header.data_header.common_header.slot = slot; header.data_header.common_header.index = index; header.data_header.parent_offset = parent_offset; header.data_header.flags = 0; if is_last_data { header.data_header.flags |= DATA_COMPLETE_SHRED } if is_last_in_slot { header.data_header.flags |= LAST_SHRED_IN_SLOT } if let Some(data) = data { bincode::serialize_into(&mut shred_buf[..*SIZE_OF_SHRED_HEADER], &header) .expect("Failed to write header into shred buffer"); shred_buf[*SIZE_OF_SHRED_HEADER..*SIZE_OF_SHRED_HEADER + data.len()] .clone_from_slice(data); } Self::new(header, shred_buf) } pub fn new_from_serialized_shred(shred_buf: Vec<u8>) -> result::Result<Self> { let shred_type: ShredType = bincode::deserialize(&shred_buf[..*SIZE_OF_SHRED_TYPE])?; let mut header = if shred_type == ShredType(CODING_SHRED) { let start = *SIZE_OF_SHRED_TYPE; let end = start + *SIZE_OF_CODING_SHRED_HEADER; let mut header = ShredHeader::default(); header.coding_header = bincode::deserialize(&shred_buf[start..end])?; header } else if shred_type == ShredType(DATA_SHRED) { let start = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; let end = start + *SIZE_OF_DATA_SHRED_HEADER; let mut header = ShredHeader::default(); header.data_header = bincode::deserialize(&shred_buf[start..end])?; header } else { return Err(Error::BlocktreeError(BlocktreeError::InvalidShredData( Box::new(bincode::ErrorKind::Custom("Invalid shred type".to_string())), ))); }; header.shred_type = shred_type; Ok(Self::new(header, shred_buf)) } pub fn new_empty_from_header(headers: ShredHeader) -> Self { let mut payload = vec![0; PACKET_DATA_SIZE]; let mut wr = io::Cursor::new(&mut payload[..*SIZE_OF_SHRED_HEADER]); bincode::serialize_into(&mut wr, &headers).expect("Failed to serialize shred"); Shred { headers, payload } } pub fn new_empty_data_shred() -> Self { let mut payload = vec![0; PACKET_DATA_SIZE]; payload[0] = DATA_SHRED; let headers = ShredHeader::default(); Shred { headers, payload } } pub fn header(&self) -> &ShredCommonHeader { if self.is_data() { &self.headers.data_header.common_header } else { &self.headers.coding_header.common_header } } pub fn header_mut(&mut self) -> &mut ShredCommonHeader { if self.is_data() { &mut self.headers.data_header.common_header } else { &mut self.headers.coding_header.common_header } } pub fn slot(&self) -> u64 { self.header().slot } pub fn parent(&self) -> u64 { if self.is_data() { self.headers.data_header.common_header.slot - u64::from(self.headers.data_header.parent_offset) } else { std::u64::MAX } } pub fn index(&self) -> u32 { self.header().index } /// This is not a safe function. It only changes the meta information. /// Use this only for test code which doesn't care about actual shred pub fn set_index(&mut self, index: u32) { self.header_mut().index = index } /// This is not a safe function. It only changes the meta information. /// Use this only for test code which doesn't care about actual shred pub fn set_slot(&mut self, slot: u64) { self.header_mut().slot = slot } pub fn signature(&self) -> Signature { self.header().signature } pub fn seed(&self) -> [u8; 32] { let mut seed = [0; 32]; let seed_len = seed.len(); let sig = self.header().signature.as_ref(); seed[0..seed_len].copy_from_slice(&sig[(sig.len() - seed_len)..]); seed } pub fn is_data(&self) -> bool { self.headers.shred_type == ShredType(DATA_SHRED) } pub fn is_code(&self) -> bool { self.headers.shred_type == ShredType(CODING_SHRED) } pub fn last_in_slot(&self) -> bool { if self.is_data() { self.headers.data_header.flags & LAST_SHRED_IN_SLOT == LAST_SHRED_IN_SLOT } else { false } } /// This is not a safe function. It only changes the meta information. /// Use this only for test code which doesn't care about actual shred pub fn set_last_in_slot(&mut self) { if self.is_data() { self.headers.data_header.flags |= LAST_SHRED_IN_SLOT } } pub fn data_complete(&self) -> bool { if self.is_data() { self.headers.data_header.flags & DATA_COMPLETE_SHRED == DATA_COMPLETE_SHRED } else { false } } pub fn coding_params(&self) -> Option<(u16, u16, u16)> { if self.is_code() { let header = &self.headers.coding_header; Some(( header.num_data_shreds, header.num_coding_shreds, header.position, )) } else { None } } pub fn verify(&self, pubkey: &Pubkey) -> bool { let signed_payload_offset = if self.is_data() { *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE } else if self.is_code() { *SIZE_OF_SHRED_TYPE } else { return false; } + *SIZE_OF_SIGNATURE; self.signature() .verify(pubkey.as_ref(), &self.payload[signed_payload_offset..]) } } #[derive(Debug)] pub struct Shredder { slot: u64, parent_slot: u64, fec_rate: f32, keypair: Arc<Keypair>, pub signing_coding_time: u128, } impl Shredder { pub fn new( slot: u64, parent_slot: u64, fec_rate: f32, keypair: Arc<Keypair>, ) -> result::Result<Self> { if fec_rate > 1.0 || fec_rate < 0.0 { Err(Error::IO(IOError::new( ErrorKind::Other, format!( "FEC rate {:?} must be more than 0.0 and less than 1.0", fec_rate ), ))) } else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) { Err(Error::IO(IOError::new( ErrorKind::Other, format!( "Current slot {:?} must be > Parent slot {:?}, but the difference must not be > {:?}", slot, parent_slot, std::u16::MAX ), ))) } else { Ok(Shredder { slot, parent_slot, fec_rate, keypair, signing_coding_time: 0, }) } } pub fn entries_to_shreds( &self, entries: &[Entry], is_last_in_slot: bool, next_shred_index: u32, ) -> (Vec<Shred>, Vec<Shred>, u32) { let now = Instant::now(); let serialized_shreds = bincode::serialize(entries).expect("Expect to serialize all entries"); let serialize_time = now.elapsed().as_millis(); let no_header_size = PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER; let num_shreds = (serialized_shreds.len() + no_header_size - 1) / no_header_size; let last_shred_index = next_shred_index + num_shreds as u32 - 1; // 1) Generate data shreds let data_shreds: Vec<Shred> = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { serialized_shreds .par_chunks(no_header_size) .enumerate() .map(|(i, shred_data)| { let shred_index = next_shred_index + i as u32; let (is_last_data, is_last_in_slot) = { if shred_index == last_shred_index { (true, is_last_in_slot) } else { (false, false) } }; let mut shred = Shred::new_from_data( self.slot, shred_index, (self.slot - self.parent_slot) as u16, Some(shred_data), is_last_data, is_last_in_slot, ); Shredder::sign_shred( &self.keypair, &mut shred, *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE, ); shred }) .collect() }) }); // 2) Generate coding shreds let mut coding_shreds: Vec<_> = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { data_shreds .par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) .flat_map(|shred_data_batch| { Shredder::generate_coding_shreds(self.slot, self.fec_rate, shred_data_batch) }) .collect() }) }); // 3) Sign coding shreds PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { coding_shreds.par_iter_mut().for_each(|mut coding_shred| { Shredder::sign_shred(&self.keypair, &mut coding_shred, *SIZE_OF_SHRED_TYPE); }) }) }); let elapsed = now.elapsed().as_millis(); datapoint_debug!( "shredding-stats", ("slot", self.slot as i64, i64), ("num_data_shreds", data_shreds.len() as i64, i64), ("num_coding_shreds", coding_shreds.len() as i64, i64), ("signing_coding", (elapsed - serialize_time) as i64, i64), ("serialzing", serialize_time as i64, i64), ); (data_shreds, coding_shreds, last_shred_index + 1) } pub fn sign_shred(signer: &Arc<Keypair>, shred_info: &mut Shred, signature_offset: usize) { let data_offset = signature_offset + *SIZE_OF_SIGNATURE; let signature = signer.sign_message(&shred_info.payload[data_offset..]); let serialized_signature = bincode::serialize(&signature).expect("Failed to generate serialized signature"); shred_info.payload[signature_offset..signature_offset + serialized_signature.len()] .copy_from_slice(&serialized_signature); shred_info.header_mut().signature = signature; } pub fn new_coding_shred_header( slot: u64, index: u32, num_data: usize, num_code: usize, position: usize, ) -> ShredHeader { let mut header = ShredHeader::default(); header.shred_type = ShredType(CODING_SHRED); header.coding_header.common_header.index = index; header.coding_header.common_header.slot = slot; header.coding_header.num_coding_shreds = num_code as u16; header.coding_header.num_data_shreds = num_data as u16; header.coding_header.position = position as u16; header } /// Generates coding shreds for the data shreds in the current FEC set pub fn generate_coding_shreds( slot: u64, fec_rate: f32, data_shred_batch: &[Shred], ) -> Vec<Shred> { assert!(!data_shred_batch.is_empty()); if fec_rate != 0.0 { let num_data = data_shred_batch.len(); // always generate at least 1 coding shred even if the fec_rate doesn't allow it let num_coding = Self::calculate_num_coding_shreds(num_data as f32, fec_rate); let session = Session::new(num_data, num_coding).expect("Failed to create erasure session"); let start_index = data_shred_batch[0].header().index; // All information after coding shred field in a data shred is encoded let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; let data_ptrs: Vec<_> = data_shred_batch .iter() .map(|data| &data.payload[coding_block_offset..]) .collect(); // Create empty coding shreds, with correctly populated headers let mut coding_shreds = Vec::with_capacity(num_coding); (0..num_coding).for_each(|i| { let header = Self::new_coding_shred_header( slot, start_index + i as u32, num_data, num_coding, i, ); let shred = Shred::new_empty_from_header(header); coding_shreds.push(shred.payload); }); // Grab pointers for the coding blocks let mut coding_ptrs: Vec<_> = coding_shreds .iter_mut() .map(|buffer| &mut buffer[coding_block_offset..]) .collect(); // Create coding blocks session .encode(&data_ptrs, coding_ptrs.as_mut_slice()) .expect("Failed in erasure encode"); // append to the shred list coding_shreds .into_iter() .enumerate() .map(|(i, code)| { let header = Self::new_coding_shred_header( slot, start_index + i as u32, num_data, num_coding, i, ); Shred::new(header, code) }) .collect() } else { vec![] } } fn calculate_num_coding_shreds(num_data_shreds: f32, fec_rate: f32) -> usize { 1.max((fec_rate * num_data_shreds) as usize) } fn fill_in_missing_shreds( num_data: usize, num_coding: usize, first_index_in_fec_set: usize, expected_index: usize, index_found: usize, present: &mut [bool], ) -> Vec<Vec<u8>> { let end_index = index_found.saturating_sub(1); // The index of current shred must be within the range of shreds that are being // recovered if !(first_index_in_fec_set..first_index_in_fec_set + num_data + num_coding) .contains(&end_index) { return vec![]; } let missing_blocks: Vec<Vec<u8>> = (expected_index..index_found) .map(|missing| { present[missing.saturating_sub(first_index_in_fec_set)] = false; if missing < first_index_in_fec_set + num_data { Shred::new_empty_data_shred().payload } else { vec![0; PACKET_DATA_SIZE] } }) .collect(); missing_blocks } pub fn try_recovery( shreds: Vec<Shred>, num_data: usize, num_coding: usize, first_index: usize, slot: u64, ) -> Result<Vec<Shred>, reed_solomon_erasure::Error> { let mut recovered_data = vec![]; let fec_set_size = num_data + num_coding; if num_coding > 0 && shreds.len() < fec_set_size { let coding_block_offset = *SIZE_OF_CODING_SHRED_HEADER + *SIZE_OF_SHRED_TYPE; // Let's try recovering missing shreds using erasure let mut present = &mut vec![true; fec_set_size]; let mut next_expected_index = first_index; let mut shred_bufs: Vec<Vec<u8>> = shreds .into_iter() .flat_map(|shred| { let index = Self::get_shred_index(&shred, num_data); let mut blocks = Self::fill_in_missing_shreds( num_data, num_coding, first_index, next_expected_index, index, &mut present, ); blocks.push(shred.payload); next_expected_index = index + 1; blocks }) .collect(); // Insert any other missing shreds after the last shred we have received in the // current FEC block let mut pending_shreds = Self::fill_in_missing_shreds( num_data, num_coding, first_index, next_expected_index, first_index + fec_set_size, &mut present, ); shred_bufs.append(&mut pending_shreds); if shred_bufs.len() != fec_set_size { return Err(reed_solomon_erasure::Error::TooFewShardsPresent); } let session = Session::new(num_data, num_coding).unwrap(); let mut blocks: Vec<(&mut [u8], bool)> = shred_bufs .iter_mut() .map(|x| x[coding_block_offset..].as_mut()) .zip(present.clone()) .collect(); session.decode_blocks(&mut blocks)?; let mut num_drained = 0; present .iter() .enumerate() .for_each(|(position, was_present)| { if !*was_present && position < num_data { let drain_this = position - num_drained; let shred_buf = shred_bufs.remove(drain_this); num_drained += 1; if let Ok(shred) = Shred::new_from_serialized_shred(shred_buf) { let shred_index = shred.index() as usize; // Valid shred must be in the same slot as the original shreds if shred.slot() == slot { // A valid data shred must be indexed between first_index and first+num_data index if (first_index..first_index + num_data).contains(&shred_index) { recovered_data.push(shred) } } } } }); } Ok(recovered_data) } /// Combines all shreds to recreate the original buffer pub fn deshred(shreds: &[Shred]) -> Result<Vec<u8>, reed_solomon_erasure::Error> { let num_data = shreds.len(); let data_shred_bufs = { let first_index = shreds.first().unwrap().index() as usize; let last_shred = shreds.last().unwrap(); let last_index = if last_shred.data_complete() || last_shred.last_in_slot() { last_shred.index() as usize } else { 0 }; if num_data.saturating_add(first_index) != last_index.saturating_add(1) { return Err(reed_solomon_erasure::Error::TooFewDataShards); } shreds.iter().map(|shred| &shred.payload).collect() }; Ok(Self::reassemble_payload(num_data, data_shred_bufs)) } fn get_shred_index(shred: &Shred, num_data: usize) -> usize { if shred.is_data() { shred.index() as usize } else { shred.index() as usize + num_data } } fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> { data_shred_bufs[..num_data] .iter() .flat_map(|data| { let offset = *SIZE_OF_SHRED_HEADER; data[offset as usize..].iter() }) .cloned() .collect() } } pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 { let ticks = create_ticks(1, Hash::default()); max_entries_per_n_shred(&ticks[0], num_shreds) } pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 { let shred_data_size = (PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER) as u64; let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); let entry_size = bincode::serialized_size(entry).unwrap(); let count_size = vec_size - entry_size; (shred_data_size * num_shreds - count_size) / entry_size } #[cfg(test)] pub mod tests { use super::*; use solana_sdk::system_transaction; use std::collections::HashSet; use std::convert::TryInto; fn verify_test_data_shred( shred: &Shred, index: u32, slot: u64, parent: u64, pk: &Pubkey, verify: bool, is_last_in_slot: bool, is_last_in_fec_set: bool, ) { assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); assert!(shred.is_data()); assert_eq!(shred.index(), index); assert_eq!(shred.slot(), slot); assert_eq!(shred.parent(), parent); assert_eq!(verify, shred.verify(pk)); if is_last_in_slot { assert!(shred.last_in_slot()); } else { assert!(!shred.last_in_slot()); } if is_last_in_fec_set { assert!(shred.data_complete()); } else { assert!(!shred.data_complete()); } } fn verify_test_code_shred(shred: &Shred, index: u32, slot: u64, pk: &Pubkey, verify: bool) { assert_eq!(shred.payload.len(), PACKET_DATA_SIZE); assert!(!shred.is_data()); assert_eq!(shred.index(), index); assert_eq!(shred.slot(), slot); assert_eq!(verify, shred.verify(pk)); } #[test] fn test_data_shredder() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; // Test that parent cannot be > current slot assert_matches!( Shredder::new(slot, slot + 1, 1.001, keypair.clone()), Err(_) ); // Test that slot - parent cannot be > u16 MAX assert_matches!( Shredder::new(slot, slot - 1 - 0xffff, 1.001, keypair.clone()), Err(_) ); let fec_rate = 0.25; let parent_slot = slot - 5; let shredder = Shredder::new(slot, parent_slot, fec_rate, keypair.clone()) .expect("Failed in creating shredder"); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); Entry::new(&Hash::default(), 1, vec![tx0]) }) .collect(); let size = serialized_size(&entries).unwrap(); let no_header_size = (PACKET_DATA_SIZE - *SIZE_OF_SHRED_HEADER) as u64; let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size; let num_expected_coding_shreds = Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate); let start_index = 0; let (data_shreds, coding_shreds, next_index) = shredder.entries_to_shreds(&entries, true, start_index); assert_eq!(next_index as u64, num_expected_data_shreds); let mut data_shred_indexes = HashSet::new(); let mut coding_shred_indexes = HashSet::new(); for shred in data_shreds.iter() { assert_eq!(shred.headers.shred_type, ShredType(DATA_SHRED)); let index = shred.headers.data_header.common_header.index; let is_last = index as u64 == num_expected_data_shreds - 1; verify_test_data_shred( shred, index, slot, parent_slot, &keypair.pubkey(), true, is_last, is_last, ); assert!(!data_shred_indexes.contains(&index)); data_shred_indexes.insert(index); } for shred in coding_shreds.iter() { let index = shred.headers.data_header.common_header.index; assert_eq!(shred.headers.shred_type, ShredType(CODING_SHRED)); verify_test_code_shred(shred, index, slot, &keypair.pubkey(), true); assert!(!coding_shred_indexes.contains(&index)); coding_shred_indexes.insert(index); } for i in start_index..start_index + num_expected_data_shreds as u32 { assert!(data_shred_indexes.contains(&i)); } for i in start_index..start_index + num_expected_coding_shreds as u32 { assert!(coding_shred_indexes.contains(&i)); } assert_eq!(data_shred_indexes.len() as u64, num_expected_data_shreds); assert_eq!(coding_shred_indexes.len(), num_expected_coding_shreds); // Test reassembly let deshred_payload = Shredder::deshred(&data_shreds).unwrap(); let deshred_entries: Vec<Entry> = bincode::deserialize(&deshred_payload).unwrap(); assert_eq!(entries, deshred_entries); } #[test] fn test_deserialize_shred_payload() { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone()) .expect("Failed in creating shredder"); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); Entry::new(&Hash::default(), 1, vec![tx0]) }) .collect(); let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; let deserialized_shred = Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap(); assert_eq!(deserialized_shred, *data_shreds.last().unwrap()); } #[test] fn test_data_and_code_shredder() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; // Test that FEC rate cannot be > 1.0 assert_matches!( Shredder::new(slot, slot - 5, 1.001, keypair.clone()), Err(_) ); let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone()) .expect("Failed in creating shredder"); // Create enough entries to make > 1 shred let num_entries = max_ticks_per_n_shreds(1) + 1; let entries: Vec<_> = (0..num_entries) .map(|_| { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); Entry::new(&Hash::default(), 1, vec![tx0]) }) .collect(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); // Must have created an equal number of coding and data shreds assert_eq!(data_shreds.len(), coding_shreds.len()); for (i, s) in data_shreds.iter().enumerate() { verify_test_data_shred( s, s.index(), slot, slot - 5, &keypair.pubkey(), true, i == data_shreds.len() - 1, i == data_shreds.len() - 1, ); } for s in coding_shreds { verify_test_code_shred(&s, s.index(), slot, &keypair.pubkey(), true); } } #[test] fn test_recovery_and_reassembly() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone()) .expect("Failed in creating shredder"); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); let num_data_shreds: usize = 5; let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); let entries: Vec<_> = (0..num_entries) .map(|_| { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); Entry::new(&Hash::default(), 1, vec![tx0]) }) .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); // We should have 10 shreds now, an equal number of coding shreds assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!(coding_shreds.len(), num_data_shreds); let all_shreds = data_shreds .iter() .cloned() .chain(coding_shreds.iter().cloned()) .collect::<Vec<_>>(); // Test0: Try recovery/reassembly with only data shreds, but not all data shreds. Hint: should fail assert_matches!( Shredder::try_recovery( data_shreds[..data_shreds.len() - 1].to_vec(), num_data_shreds, num_data_shreds, 0, slot ), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); // Test1: Try recovery/reassembly with only data shreds. Hint: should work let recovered_data = Shredder::try_recovery( data_shreds[..].to_vec(), num_data_shreds, num_data_shreds, 0, slot, ) .unwrap(); assert!(recovered_data.is_empty()); // Test2: Try recovery/reassembly with missing data shreds + coding shreds. Hint: should work let mut shred_info: Vec<Shred> = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 == 0 { Some(b.clone()) } else { None }) .collect(); let mut recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, num_data_shreds, 0, slot, ) .unwrap(); assert_eq!(recovered_data.len(), 2); // Data shreds 1 and 3 were missing let recovered_shred = recovered_data.remove(0); verify_test_data_shred( &recovered_shred, 1, slot, slot - 5, &keypair.pubkey(), true, false, false, ); shred_info.insert(1, recovered_shred); let recovered_shred = recovered_data.remove(0); verify_test_data_shred( &recovered_shred, 3, slot, slot - 5, &keypair.pubkey(), true, false, false, ); shred_info.insert(3, recovered_shred); let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); assert!(result.len() >= serialized_entries.len()); assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); // Test3: Try recovery/reassembly with 3 missing data shreds + 2 coding shreds. Hint: should work let mut shred_info: Vec<Shred> = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) .collect(); let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, num_data_shreds, 0, slot, ) .unwrap(); assert_eq!(recovered_data.len(), 3); // Data shreds 0, 2, 4 were missing for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = i * 2; verify_test_data_shred( &recovered_shred, index.try_into().unwrap(), slot, slot - 5, &keypair.pubkey(), true, recovered_shred.index() as usize == num_data_shreds - 1, recovered_shred.index() as usize == num_data_shreds - 1, ); shred_info.insert(i * 2, recovered_shred); } let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); assert!(result.len() >= serialized_entries.len()); assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); // Test4: Try reassembly with 2 missing data shreds, but keeping the last // data shred. Hint: should fail let shreds: Vec<Shred> = all_shreds[..num_data_shreds] .iter() .enumerate() .filter_map(|(i, s)| { if (i < 4 && i % 2 != 0) || i == num_data_shreds - 1 { // Keep 1, 3, 4 Some(s.clone()) } else { None } }) .collect(); assert_eq!(shreds.len(), 3); assert_matches!( Shredder::deshred(&shreds), Err(reed_solomon_erasure::Error::TooFewDataShards) ); // Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds // and 2 missing coding shreds. Hint: should work let serialized_entries = bincode::serialize(&entries).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 25); // We should have 10 shreds now, an equal number of coding shreds assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!(coding_shreds.len(), num_data_shreds); let all_shreds = data_shreds .iter() .cloned() .chain(coding_shreds.iter().cloned()) .collect::<Vec<_>>(); let mut shred_info: Vec<Shred> = all_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) .collect(); let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, num_data_shreds, 25, slot, ) .unwrap(); assert_eq!(recovered_data.len(), 3); // Data shreds 25, 27, 29 were missing for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = 25 + (i * 2); verify_test_data_shred( &recovered_shred, index.try_into().unwrap(), slot, slot - 5, &keypair.pubkey(), true, index == 25 + num_data_shreds - 1, index == 25 + num_data_shreds - 1, ); shred_info.insert(i * 2, recovered_shred); } let result = Shredder::deshred(&shred_info[..num_data_shreds]).unwrap(); assert!(result.len() >= serialized_entries.len()); assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); // Test6: Try recovery/reassembly with incorrect slot. Hint: does not recover any shreds let recovered_data = Shredder::try_recovery( shred_info.clone(), num_data_shreds, num_data_shreds, 25, slot + 1, ) .unwrap(); assert!(recovered_data.is_empty()); // Test7: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( Shredder::try_recovery( shred_info.clone(), num_data_shreds, num_data_shreds, 15, slot, ), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( Shredder::try_recovery(shred_info, num_data_shreds, num_data_shreds, 35, slot,), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); } #[test] fn test_multi_fec_block_coding() { let keypair = Arc::new(Keypair::new()); let slot = 0x123456789abcdef0; let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone()) .expect("Failed in creating shredder"); let num_fec_sets = 100; let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64); let entries: Vec<_> = (0..num_entries) .map(|_| { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); Entry::new(&Hash::default(), 1, vec![tx0]) }) .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); let (data_shreds, coding_shreds, next_index) = shredder.entries_to_shreds(&entries, true, 0); assert_eq!(next_index as usize, num_data_shreds); assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!(coding_shreds.len(), num_data_shreds); for c in &coding_shreds { assert!(!c.is_data()); } let mut all_shreds = vec![]; for i in 0..num_fec_sets { let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize; let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1; let fec_set_shreds = data_shreds[shred_start_index..=end_index] .iter() .cloned() .chain(coding_shreds[shred_start_index..=end_index].iter().cloned()) .collect::<Vec<_>>(); let mut shred_info: Vec<Shred> = fec_set_shreds .iter() .enumerate() .filter_map(|(i, b)| if i % 2 != 0 { Some(b.clone()) } else { None }) .collect(); let recovered_data = Shredder::try_recovery( shred_info.clone(), MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, shred_start_index, slot, ) .unwrap(); for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = shred_start_index + (i * 2); verify_test_data_shred( &recovered_shred, index.try_into().unwrap(), slot, slot - 5, &keypair.pubkey(), true, index == end_index, index == end_index, ); shred_info.insert(i * 2, recovered_shred); } all_shreds.extend( shred_info .into_iter() .take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize), ); } let result = Shredder::deshred(&all_shreds[..]).unwrap(); assert_eq!(serialized_entries[..], result[..serialized_entries.len()]); } }
35.85415
114
0.551635
16f99f1fb0260655d8bf7f20f4115580928e0f48
15,192
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use super::cfg::BlockCFG; use crate::parser::ast::Var; use std::collections::BTreeSet; pub fn optimize(cfg: &mut BlockCFG) { super::remove_no_ops::optimize(cfg); loop { let ssa_temps = { let s = count(cfg); if s.is_empty() { return; } s }; eliminate(cfg, ssa_temps); super::remove_no_ops::optimize(cfg); } } //************************************************************************************************** // Count assignment and usage //************************************************************************************************** fn count(cfg: &BlockCFG) -> BTreeSet<Var> { let mut context = count::Context::new(); for block in cfg.blocks().values() { for cmd in block { count::command(&mut context, cmd) } } context.finish() } mod count { use crate::{ hlir::ast::*, parser::ast::{BinOp, UnaryOp, Var}, shared::*, }; use std::collections::{BTreeMap, BTreeSet}; pub struct Context { assigned: BTreeMap<Var, Option<usize>>, used: BTreeMap<Var, Option<usize>>, } impl Context { pub fn new() -> Self { Context { assigned: BTreeMap::new(), used: BTreeMap::new(), } } fn assign(&mut self, var: &Var, substitutable: bool) { if !substitutable { self.assigned.insert(var.clone(), None); return; } if let Some(count) = self.assigned.entry(var.clone()).or_insert_with(|| Some(0)) { *count += 1 } } fn used(&mut self, var: &Var, substitutable: bool) { if !substitutable { self.used.insert(var.clone(), None); return; } if let Some(count) = self.used.entry(var.clone()).or_insert_with(|| Some(0)) { *count += 1 } } pub fn finish(self) -> BTreeSet<Var> { let Context { assigned, used } = self; assigned .into_iter() .filter(|(_v, count)| count.map(|c| c == 1).unwrap_or(false)) .map(|(v, _count)| v) .filter(|v| { used.get(v) .unwrap_or(&None) .map(|c| c == 1) .unwrap_or(false) }) .collect() } } pub fn command(context: &mut Context, sp!(_, cmd_): &Command) { use Command_ as C; match cmd_ { C::Assign(ls, e) => { exp(context, e); let substitutable_rvalues = can_subst_exp(ls.len(), e); lvalues(context, ls, substitutable_rvalues); } C::Mutate(el, er) => { exp(context, er); exp(context, el) } C::Return(e) | C::Abort(e) | C::IgnoreAndPop { exp: e, .. } | C::JumpIf { cond: e, .. } => exp(context, e), C::Jump(_) => (), C::Break | C::Continue => panic!("ICE break/continue not translated to jumps"), } } fn lvalues(context: &mut Context, ls: &[LValue], substitutable_rvalues: Vec<bool>) { assert!(ls.len() == substitutable_rvalues.len()); ls.iter() .zip(substitutable_rvalues) .for_each(|(l, substitutable)| lvalue(context, l, substitutable)) } fn lvalue(context: &mut Context, sp!(_, l_): &LValue, substitutable: bool) { use LValue_ as L; match l_ { L::Ignore | L::Unpack(_, _, _) => (), L::Var(v, _) => context.assign(v, substitutable), } } fn exp(context: &mut Context, parent_e: &Exp) { use UnannotatedExp_ as E; match &parent_e.exp.value { E::Unit | E::Value(_) | E::UnresolvedError => (), E::Spec(_, used_locals) => { used_locals.keys().for_each(|var| context.used(var, false)); } E::BorrowLocal(_, var) => context.used(var, false), E::Copy { var, .. } | E::Move { var, .. } => context.used(var, true), E::ModuleCall(mcall) => exp(context, &mcall.arguments), E::Builtin(_, e) | E::Freeze(e) | E::Dereference(e) | E::UnaryExp(_, e) | E::Borrow(_, e, _) | E::Cast(e, _) => exp(context, e), E::BinopExp(e1, _, e2) => { exp(context, e1); exp(context, e2) } E::Pack(_, _, fields) => fields.iter().for_each(|(_, _, e)| exp(context, e)), E::ExpList(es) => es.iter().for_each(|item| exp_list_item(context, item)), E::Unreachable => panic!("ICE should not analyze dead code"), } } fn exp_list_item(context: &mut Context, item: &ExpListItem) { match item { ExpListItem::Single(e, _) | ExpListItem::Splat(_, e, _) => exp(context, e), } } fn can_subst_exp(lvalue_len: usize, exp: &Exp) -> Vec<bool> { use ExpListItem as I; use UnannotatedExp_ as E; match (lvalue_len, &exp.exp.value) { (0, _) => vec![], (1, _) => vec![can_subst_exp_single(exp)], (_, E::ExpList(es)) if es.iter().all(|item| match item { I::Splat(_, _, _) => false, I::Single(_, _) => true, }) => { es.iter() .map(|item| match item { I::Single(e, _) => can_subst_exp_single(e), I::Splat(_, _, _) => unreachable!(), }) .collect() } (_, _) => (0..lvalue_len).map(|_| false).collect(), } } fn can_subst_exp_single(parent_e: &Exp) -> bool { use UnannotatedExp_ as E; match &parent_e.exp.value { E::UnresolvedError | E::Spec(_, _) | E::BorrowLocal(_, _) | E::Copy { .. } | E::Builtin(_, _) | E::Freeze(_) | E::Dereference(_) | E::Move { .. } | E::Borrow(_, _, _) => false, E::Unit | E::Value(_) => true, E::Cast(e, _) => can_subst_exp_single(e), E::UnaryExp(op, e) => can_subst_exp_unary(op) && can_subst_exp_single(e), E::BinopExp(e1, op, e2) => { can_subst_exp_binary(op) && can_subst_exp_single(e1) && can_subst_exp_single(e2) } E::ModuleCall(mcall) => can_subst_exp_module_call(mcall), E::ExpList(es) => es.iter().all(|i| can_subst_exp_item(i)), E::Pack(_, _, fields) => fields.iter().all(|(_, _, e)| can_subst_exp_single(e)), E::Unreachable => panic!("ICE should not analyze dead code"), } } fn can_subst_exp_unary(sp!(_, op_): &UnaryOp) -> bool { op_.is_pure() } fn can_subst_exp_binary(sp!(_, op_): &BinOp) -> bool { op_.is_pure() } fn can_subst_exp_module_call(mcall: &ModuleCall) -> bool { use crate::shared::fake_natives::transaction as TXN; let ModuleCall { module, name, arguments, .. } = mcall; let a_m_f = ( &module.0.value.address, module.0.value.name.value(), name.value(), ); let call_is_pure = match a_m_f { (&Address::LIBRA_CORE, TXN::MOD, TXN::ASSERT) => panic!("ICE should have been inlined"), (&Address::LIBRA_CORE, TXN::MOD, TXN::MAX_GAS) | (&Address::LIBRA_CORE, TXN::MOD, TXN::SENDER) | (&Address::LIBRA_CORE, TXN::MOD, TXN::SEQUENCE_NUM) | (&Address::LIBRA_CORE, TXN::MOD, TXN::PUBLIC_KEY) | (&Address::LIBRA_CORE, TXN::MOD, TXN::GAS_PRICE) => true, _ => false, }; call_is_pure && can_subst_exp_single(arguments) } fn can_subst_exp_item(item: &ExpListItem) -> bool { use ExpListItem as I; match item { I::Single(e, _) => can_subst_exp_single(e), I::Splat(_, es, _) => can_subst_exp_single(es), } } } //************************************************************************************************** // Eliminate //************************************************************************************************** fn eliminate(cfg: &mut BlockCFG, ssa_temps: BTreeSet<Var>) { let context = &mut eliminate::Context::new(ssa_temps); loop { for block in cfg.blocks_mut().values_mut() { for cmd in block { eliminate::command(context, cmd) } } if context.finished() { return; } } } mod eliminate { use crate::{ hlir::ast::{self as H, *}, parser::ast::Var, }; use move_ir_types::location::*; use std::collections::{BTreeMap, BTreeSet}; pub struct Context { eliminated: BTreeMap<Var, Exp>, ssa_temps: BTreeSet<Var>, } impl Context { pub fn new(ssa_temps: BTreeSet<Var>) -> Self { Context { ssa_temps, eliminated: BTreeMap::new(), } } pub fn finished(&self) -> bool { self.eliminated.is_empty() && self.ssa_temps.is_empty() } } pub fn command(context: &mut Context, sp!(_, cmd_): &mut Command) { use Command_ as C; match cmd_ { C::Assign(ls, e) => { exp(context, e); let eliminated = lvalues(context, ls); remove_eliminated(context, eliminated, e) } C::Mutate(el, er) => { exp(context, er); exp(context, el) } C::Return(e) | C::Abort(e) | C::IgnoreAndPop { exp: e, .. } | C::JumpIf { cond: e, .. } => exp(context, e), C::Jump(_) => (), C::Break | C::Continue => panic!("ICE break/continue not translated to jumps"), } } enum LRes { Same(LValue), Elim(Var), } fn lvalues(context: &mut Context, ls: &mut Vec<LValue>) -> Vec<Option<Var>> { let old = std::mem::replace(ls, vec![]); old.into_iter() .map(|l| match lvalue(context, l) { LRes::Same(lvalue) => { ls.push(lvalue); None } LRes::Elim(v) => Some(v), }) .collect() } fn lvalue(context: &mut Context, sp!(loc, l_): LValue) -> LRes { use LValue_ as L; match l_ { l_ @ L::Ignore | l_ @ L::Unpack(_, _, _) => LRes::Same(sp(loc, l_)), L::Var(v, t) => { let contained = context.ssa_temps.remove(&v); if contained { LRes::Elim(v) } else { LRes::Same(sp(loc, L::Var(v, t))) } } } } fn exp(context: &mut Context, parent_e: &mut Exp) { use UnannotatedExp_ as E; match &mut parent_e.exp.value { E::Copy { var, .. } | E::Move { var, .. } => { if let Some(replacement) = context.eliminated.remove(var) { *parent_e = replacement } } E::Unit | E::Value(_) | E::Spec(_, _) | E::UnresolvedError | E::BorrowLocal(_, _) => (), E::ModuleCall(mcall) => exp(context, &mut mcall.arguments), E::Builtin(_, e) | E::Freeze(e) | E::Dereference(e) | E::UnaryExp(_, e) | E::Borrow(_, e, _) | E::Cast(e, _) => exp(context, e), E::BinopExp(e1, _, e2) => { exp(context, e1); exp(context, e2) } E::Pack(_, _, fields) => fields.iter_mut().for_each(|(_, _, e)| exp(context, e)), E::ExpList(es) => es.iter_mut().for_each(|item| exp_list_item(context, item)), E::Unreachable => panic!("ICE should not analyze dead code"), } } fn exp_list_item(context: &mut Context, item: &mut ExpListItem) { match item { ExpListItem::Single(e, _) | ExpListItem::Splat(_, e, _) => exp(context, e), } } fn remove_eliminated(context: &mut Context, mut eliminated: Vec<Option<Var>>, e: &mut Exp) { if eliminated.iter().all(|opt| opt.is_none()) { return; } match eliminated.len() { 0 => (), 1 => remove_eliminated_single(context, eliminated.pop().unwrap().unwrap(), e), _ => { let tys = match &mut e.ty.value { Type_::Multiple(tys) => tys, _ => panic!("ICE local elimination type mismatch"), }; let es = match &mut e.exp.value { UnannotatedExp_::ExpList(es) => es, _ => panic!("ICE local elimination type mismatch"), }; let old_tys = std::mem::replace(tys, vec![]); let old_es = std::mem::replace(es, vec![]); for ((mut item, ty), elim_opt) in old_es.into_iter().zip(old_tys).zip(eliminated) { let e = match &mut item { ExpListItem::Single(e, _) => e, ExpListItem::Splat(_, _, _) => { panic!("ICE local elimination filtering failed") } }; match elim_opt { None => { tys.push(ty); es.push(item) } Some(v) => { remove_eliminated_single(context, v, e); match &e.ty.value { Type_::Unit => (), Type_::Single(_) => { tys.push(ty); es.push(item) } Type_::Multiple(_) => { panic!("ICE local elimination replacement type mismatch") } } } } } if es.is_empty() { *e = unit(e.exp.loc) } } } } fn remove_eliminated_single(context: &mut Context, v: Var, e: &mut Exp) { let old = std::mem::replace(e, unit(e.exp.loc)); context.eliminated.insert(v, old); } fn unit(loc: Loc) -> Exp { H::exp(sp(loc, Type_::Unit), sp(loc, UnannotatedExp_::Unit)) } }
32.670968
100
0.438389
8903da50ff67d72942afadf576a24da9256e09b7
2,183
#[doc = "Reader of register ACC29_14"] pub type R = crate::R<u32, super::ACC29_14>; #[doc = "Writer for register ACC29_14"] pub type W = crate::W<u32, super::ACC29_14>; #[doc = "Register ACC29_14 `reset()`'s with value 0"] impl crate::ResetValue for super::ACC29_14 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `RESERVED16`"] pub type RESERVED16_R = crate::R<u16, u16>; #[doc = "Write proxy for field `RESERVED16`"] pub struct RESERVED16_W<'a> { w: &'a mut W, } impl<'a> RESERVED16_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16); self.w } } #[doc = "Reader of field `VALUE`"] pub type VALUE_R = crate::R<u16, u16>; #[doc = "Write proxy for field `VALUE`"] pub struct VALUE_W<'a> { w: &'a mut W, } impl<'a> VALUE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff); self.w } } impl R { #[doc = "Bits 16:31 - 31:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved16(&self) -> RESERVED16_R { RESERVED16_R::new(((self.bits >> 16) & 0xffff) as u16) } #[doc = "Bits 0:15 - 15:0\\] Value of the accumulator, bits 29:14."] #[inline(always)] pub fn value(&self) -> VALUE_R { VALUE_R::new((self.bits & 0xffff) as u16) } } impl W { #[doc = "Bits 16:31 - 31:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline(always)] pub fn reserved16(&mut self) -> RESERVED16_W { RESERVED16_W { w: self } } #[doc = "Bits 0:15 - 15:0\\] Value of the accumulator, bits 29:14."] #[inline(always)] pub fn value(&mut self) -> VALUE_W { VALUE_W { w: self } } }
31.637681
133
0.595053
62d80aa148b16ba0c01ffc61abdc637c48fe2ab6
422
#![allow(non_camel_case_types, non_snake_case)] extern crate xcb; extern crate libc; pub mod ffi; #[macro_use] mod util; #[cfg(feature = "icccm")] pub mod icccm; #[cfg(feature = "ewmh")] pub mod ewmh; #[cfg(feature = "image")] pub mod image; #[cfg(feature = "cursor")] pub mod cursor; #[cfg(feature = "keysyms")] pub mod keysyms; #[cfg(feature = "misc")] pub mod misc; #[cfg(feature = "render")] pub mod render;
13.612903
47
0.661137
0eb732219cf679df496ce9d7eadc6fa8b7bb67cc
508
use crate::kernel::BrokerEvent; /// Wrapper for a platform-specific channel sender. pub trait Sender: Send { /// send a BrokerEvent to the channel receiver fn send(&self, event: BrokerEvent); /// clones a sender and returns new boxed instance /// /// # open issues /// /// * https://github.com/ce-rust/cerk/issues/21 fn clone_boxed(&self) -> Box<dyn Sender + Send>; } /// Boxed wrapper for a platform-specific channel sender. pub type BoxedSender = Box<dyn Sender + Send>;
28.222222
57
0.665354
22b6a684a951e3d76fc4aedcd0ec7778b7c2b704
5,826
#[doc = r"Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - Control Register"] pub cr: crate::Reg<cr::CR_SPEC>, #[doc = "0x04 - Configuration Register"] pub cfg: crate::Reg<cfg::CFG_SPEC>, #[doc = "0x08 - Status Register"] pub sr: crate::Reg<sr::SR_SPEC>, #[doc = "0x0c - Status Clear Register"] pub scr: crate::Reg<scr::SCR_SPEC>, #[doc = "0x10 - Resistive Touch Screen Register"] pub rts: crate::Reg<rts::RTS_SPEC>, #[doc = "0x14 - Sequencer Configuration Register"] pub seqcfg: crate::Reg<seqcfg::SEQCFG_SPEC>, _reserved_6_first_dma_word_cdma: [u8; 0x04], #[doc = "0x1c - Timing Configuration Register"] pub tim: crate::Reg<tim::TIM_SPEC>, #[doc = "0x20 - Internal Timer Register"] pub itimer: crate::Reg<itimer::ITIMER_SPEC>, #[doc = "0x24 - Window Monitor Configuration Register"] pub wcfg: crate::Reg<wcfg::WCFG_SPEC>, #[doc = "0x28 - Window Monitor Threshold Configuration Register"] pub wth: crate::Reg<wth::WTH_SPEC>, #[doc = "0x2c - Sequencer Last Converted Value Register"] pub lcv: crate::Reg<lcv::LCV_SPEC>, #[doc = "0x30 - Interrupt Enable Register"] pub ier: crate::Reg<ier::IER_SPEC>, #[doc = "0x34 - Interrupt Disable Register"] pub idr: crate::Reg<idr::IDR_SPEC>, #[doc = "0x38 - Interrupt Mask Register"] pub imr: crate::Reg<imr::IMR_SPEC>, #[doc = "0x3c - Calibration Register"] pub calib: crate::Reg<calib::CALIB_SPEC>, #[doc = "0x40 - Version Register"] pub version: crate::Reg<version::VERSION_SPEC>, #[doc = "0x44 - Parameter Register"] pub parameter: crate::Reg<parameter::PARAMETER_SPEC>, } impl RegisterBlock { #[doc = "0x18 - Configuration Direct Memory Access Register"] #[inline(always)] pub fn second_dma_word_cdma_alt( &self, ) -> &crate::Reg<second_dma_word_cdma_alt::SECOND_DMA_WORD_CDMA_ALT_SPEC> { unsafe { &*(((self as *const Self) as *const u8).add(24usize) as *const crate::Reg<second_dma_word_cdma_alt::SECOND_DMA_WORD_CDMA_ALT_SPEC>) } } #[doc = "0x18 - Configuration Direct Memory Access Register"] #[inline(always)] pub fn first_dma_word_cdma( &self, ) -> &crate::Reg<first_dma_word_cdma::FIRST_DMA_WORD_CDMA_SPEC> { unsafe { &*(((self as *const Self) as *const u8).add(24usize) as *const crate::Reg<first_dma_word_cdma::FIRST_DMA_WORD_CDMA_SPEC>) } } } #[doc = "CALIB register accessor: an alias for `Reg<CALIB_SPEC>`"] pub type CALIB = crate::Reg<calib::CALIB_SPEC>; #[doc = "Calibration Register"] pub mod calib; #[doc = "FIRST_DMA_WORD_CDMA register accessor: an alias for `Reg<FIRST_DMA_WORD_CDMA_SPEC>`"] pub type FIRST_DMA_WORD_CDMA = crate::Reg<first_dma_word_cdma::FIRST_DMA_WORD_CDMA_SPEC>; #[doc = "Configuration Direct Memory Access Register"] pub mod first_dma_word_cdma; #[doc = "SECOND_DMA_WORD_CDMA_ALT register accessor: an alias for `Reg<SECOND_DMA_WORD_CDMA_ALT_SPEC>`"] pub type SECOND_DMA_WORD_CDMA_ALT = crate::Reg<second_dma_word_cdma_alt::SECOND_DMA_WORD_CDMA_ALT_SPEC>; #[doc = "Configuration Direct Memory Access Register"] pub mod second_dma_word_cdma_alt; #[doc = "CFG register accessor: an alias for `Reg<CFG_SPEC>`"] pub type CFG = crate::Reg<cfg::CFG_SPEC>; #[doc = "Configuration Register"] pub mod cfg; #[doc = "CR register accessor: an alias for `Reg<CR_SPEC>`"] pub type CR = crate::Reg<cr::CR_SPEC>; #[doc = "Control Register"] pub mod cr; #[doc = "IDR register accessor: an alias for `Reg<IDR_SPEC>`"] pub type IDR = crate::Reg<idr::IDR_SPEC>; #[doc = "Interrupt Disable Register"] pub mod idr; #[doc = "IER register accessor: an alias for `Reg<IER_SPEC>`"] pub type IER = crate::Reg<ier::IER_SPEC>; #[doc = "Interrupt Enable Register"] pub mod ier; #[doc = "IMR register accessor: an alias for `Reg<IMR_SPEC>`"] pub type IMR = crate::Reg<imr::IMR_SPEC>; #[doc = "Interrupt Mask Register"] pub mod imr; #[doc = "ITIMER register accessor: an alias for `Reg<ITIMER_SPEC>`"] pub type ITIMER = crate::Reg<itimer::ITIMER_SPEC>; #[doc = "Internal Timer Register"] pub mod itimer; #[doc = "LCV register accessor: an alias for `Reg<LCV_SPEC>`"] pub type LCV = crate::Reg<lcv::LCV_SPEC>; #[doc = "Sequencer Last Converted Value Register"] pub mod lcv; #[doc = "PARAMETER register accessor: an alias for `Reg<PARAMETER_SPEC>`"] pub type PARAMETER = crate::Reg<parameter::PARAMETER_SPEC>; #[doc = "Parameter Register"] pub mod parameter; #[doc = "RTS register accessor: an alias for `Reg<RTS_SPEC>`"] pub type RTS = crate::Reg<rts::RTS_SPEC>; #[doc = "Resistive Touch Screen Register"] pub mod rts; #[doc = "SCR register accessor: an alias for `Reg<SCR_SPEC>`"] pub type SCR = crate::Reg<scr::SCR_SPEC>; #[doc = "Status Clear Register"] pub mod scr; #[doc = "SEQCFG register accessor: an alias for `Reg<SEQCFG_SPEC>`"] pub type SEQCFG = crate::Reg<seqcfg::SEQCFG_SPEC>; #[doc = "Sequencer Configuration Register"] pub mod seqcfg; #[doc = "SR register accessor: an alias for `Reg<SR_SPEC>`"] pub type SR = crate::Reg<sr::SR_SPEC>; #[doc = "Status Register"] pub mod sr; #[doc = "TIM register accessor: an alias for `Reg<TIM_SPEC>`"] pub type TIM = crate::Reg<tim::TIM_SPEC>; #[doc = "Timing Configuration Register"] pub mod tim; #[doc = "VERSION register accessor: an alias for `Reg<VERSION_SPEC>`"] pub type VERSION = crate::Reg<version::VERSION_SPEC>; #[doc = "Version Register"] pub mod version; #[doc = "WCFG register accessor: an alias for `Reg<WCFG_SPEC>`"] pub type WCFG = crate::Reg<wcfg::WCFG_SPEC>; #[doc = "Window Monitor Configuration Register"] pub mod wcfg; #[doc = "WTH register accessor: an alias for `Reg<WTH_SPEC>`"] pub type WTH = crate::Reg<wth::WTH_SPEC>; #[doc = "Window Monitor Threshold Configuration Register"] pub mod wth;
41.913669
104
0.686234
72808c64fb31b427ee7050a6a55f4d641eaaf696
4,736
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[cfg(feature = "bench")] use std::iter; #[cfg(feature = "bench")] use test::{self, Bencher}; #[cfg(feature = "bench")] use super::UnicodeWidthChar; use std::prelude::v1::*; #[cfg(feature = "bench")] #[bench] fn cargo(b: &mut Bencher) { let string = iter::repeat('a').take(4096).collect::<String>(); b.iter(|| { for c in string.chars() { test::black_box(UnicodeWidthChar::width(c)); } }); } #[cfg(feature = "bench")] #[bench] #[allow(deprecated)] fn stdlib(b: &mut Bencher) { let string = iter::repeat('a').take(4096).collect::<String>(); b.iter(|| { for c in string.chars() { test::black_box(c.width()); } }); } #[cfg(feature = "bench")] #[bench] fn simple_if(b: &mut Bencher) { let string = iter::repeat('a').take(4096).collect::<String>(); b.iter(|| { for c in string.chars() { test::black_box(simple_width_if(c)); } }); } #[cfg(feature = "bench")] #[bench] fn simple_match(b: &mut Bencher) { let string = iter::repeat('a').take(4096).collect::<String>(); b.iter(|| { for c in string.chars() { test::black_box(simple_width_match(c)); } }); } #[cfg(feature = "bench")] #[inline] fn simple_width_if(c: char) -> Option<usize> { let cu = c as u32; if cu < 127 { if cu > 31 { Some(1) } else if cu == 0 { Some(0) } else { None } } else { UnicodeWidthChar::width(c) } } #[cfg(feature = "bench")] #[inline] fn simple_width_match(c: char) -> Option<usize> { match c as u32 { cu if cu == 0 => Some(0), cu if cu < 0x20 => None, cu if cu < 0x7f => Some(1), _ => UnicodeWidthChar::width(c) } } #[test] fn test_str() { use super::UnicodeWidthStr; assert_eq!(UnicodeWidthStr::width("hello"), 10); assert_eq!("hello".width_cjk(), 10); assert_eq!(UnicodeWidthStr::width("\0\0\0\x01\x01"), 0); assert_eq!("\0\0\0\x01\x01".width_cjk(), 0); assert_eq!(UnicodeWidthStr::width(""), 0); assert_eq!("".width_cjk(), 0); assert_eq!(UnicodeWidthStr::width("\u{2081}\u{2082}\u{2083}\u{2084}"), 4); assert_eq!("\u{2081}\u{2082}\u{2083}\u{2084}".width_cjk(), 8); } #[test] fn test_emoji() { // Example from the README. use super::UnicodeWidthStr; assert_eq!(UnicodeWidthStr::width("👩"), 2); // Woman assert_eq!(UnicodeWidthStr::width("🔬"), 2); // Microscope assert_eq!(UnicodeWidthStr::width("👩‍🔬"), 4); // Woman scientist } #[test] fn test_char() { use super::UnicodeWidthChar; #[cfg(feature = "no_std")] use core::option::Option::{Some, None}; assert_eq!(UnicodeWidthChar::width('h'), Some(2)); assert_eq!('h'.width_cjk(), Some(2)); assert_eq!(UnicodeWidthChar::width('\x00'), Some(0)); assert_eq!('\x00'.width_cjk(), Some(0)); assert_eq!(UnicodeWidthChar::width('\x01'), None); assert_eq!('\x01'.width_cjk(), None); assert_eq!(UnicodeWidthChar::width('\u{2081}'), Some(1)); assert_eq!('\u{2081}'.width_cjk(), Some(2)); } #[test] fn test_char2() { use super::UnicodeWidthChar; #[cfg(feature = "no_std")] use core::option::Option::{Some, None}; assert_eq!(UnicodeWidthChar::width('\x00'),Some(0)); assert_eq!('\x00'.width_cjk(),Some(0)); assert_eq!(UnicodeWidthChar::width('\x0A'),None); assert_eq!('\x0A'.width_cjk(),None); assert_eq!(UnicodeWidthChar::width('w'),Some(1)); assert_eq!('w'.width_cjk(),Some(1)); assert_eq!(UnicodeWidthChar::width('h'),Some(2)); assert_eq!('h'.width_cjk(),Some(2)); assert_eq!(UnicodeWidthChar::width('\u{AD}'),Some(1)); assert_eq!('\u{AD}'.width_cjk(),Some(1)); assert_eq!(UnicodeWidthChar::width('\u{1160}'),Some(0)); assert_eq!('\u{1160}'.width_cjk(),Some(0)); assert_eq!(UnicodeWidthChar::width('\u{a1}'),Some(1)); assert_eq!('\u{a1}'.width_cjk(),Some(2)); assert_eq!(UnicodeWidthChar::width('\u{300}'),Some(0)); assert_eq!('\u{300}'.width_cjk(),Some(0)); } #[test] fn unicode_12() { use super::UnicodeWidthChar; #[cfg(feature = "no_std")] use core::option::Option::{Some, None}; assert_eq!(UnicodeWidthChar::width('\u{1F971}'), Some(2)); }
26.909091
78
0.592272
2912f2c02bfdc34d2506bd0bdc42ead6c8d2575c
3,417
use super::{Context, Module}; use crate::utils; use ansi_term::Color; use serde_json as json; use toml; /// Creates a module with the current package version /// /// Will display if a version is defined for your Node.js or Rust project (if one exists) pub fn module<'a>(context: &'a Context) -> Option<Module<'a>> { match get_package_version() { Some(package_version) => { const PACKAGE_CHAR: &str = "📦 "; let module_color = Color::Red.bold(); let mut module = context.new_module("package")?; module.set_style(module_color); module.get_prefix().set_value("is "); module.new_segment("symbol", PACKAGE_CHAR); module.new_segment("version", &package_version); Some(module) } None => None, } } fn extract_cargo_version(file_contents: &str) -> Option<String> { let cargo_toml: toml::Value = toml::from_str(&file_contents).ok()?; let raw_version = cargo_toml.get("package")?.get("version")?.as_str()?; let formatted_version = format_version(raw_version); Some(formatted_version) } fn extract_package_version(file_contents: &str) -> Option<String> { let package_json: json::Value = json::from_str(&file_contents).ok()?; let raw_version = package_json.get("version")?.as_str()?; if raw_version == "null" { return None; }; let formatted_version = format_version(raw_version); Some(formatted_version) } fn get_package_version() -> Option<String> { let cargo_toml = utils::read_file("Cargo.toml"); if let Ok(cargo_toml) = cargo_toml { return extract_cargo_version(&cargo_toml); } let package_json = utils::read_file("package.json"); if let Ok(package_json) = package_json { return extract_package_version(&package_json); } None } fn format_version(version: &str) -> String { format!("v{}", version.replace('"', "").trim()) } #[cfg(test)] mod tests { use super::*; #[test] fn test_format_version() { assert_eq!(format_version("0.1.0"), "v0.1.0"); } #[test] fn test_extract_cargo_version() { let cargo_with_version = toml::toml! { [package] name = "starship" version = "0.1.0" } .to_string(); let expected_version = Some("v0.1.0".to_string()); assert_eq!(extract_cargo_version(&cargo_with_version), expected_version); let cargo_without_version = toml::toml! { [package] name = "starship" } .to_string(); let expected_version = None; assert_eq!( extract_cargo_version(&cargo_without_version), expected_version ); } #[test] fn test_extract_package_version() { let package_with_version = json::json!({ "name": "spacefish", "version": "0.1.0" }) .to_string(); let expected_version = Some("v0.1.0".to_string()); assert_eq!( extract_package_version(&package_with_version), expected_version ); let package_without_version = json::json!({ "name": "spacefish" }) .to_string(); let expected_version = None; assert_eq!( extract_package_version(&package_without_version), expected_version ); } }
26.905512
89
0.596137
2159ca8b9c6e1ae4de393e12642e1c46c53537be
3,412
use std::sync::Arc; use lru::LruCache; use skia_safe::{font::Edging, Data, Font, FontHinting, FontMgr, FontStyle, Typeface}; use super::swash_font::SwashFont; #[derive(RustEmbed)] #[folder = "assets/fonts/"] pub struct Asset; const DEFAULT_FONT: &str = "FiraCode-Regular.ttf"; pub struct FontPair { pub skia_font: Font, pub swash_font: SwashFont, } impl FontPair { fn new(mut skia_font: Font) -> Option<FontPair> { skia_font.set_subpixel(true); skia_font.set_hinting(FontHinting::Full); skia_font.set_edging(Edging::SubpixelAntiAlias); let (font_data, index) = skia_font.typeface().unwrap().to_font_data().unwrap(); let swash_font = SwashFont::from_data(font_data, index)?; Some(Self { skia_font, swash_font, }) } } impl PartialEq for FontPair { fn eq(&self, other: &Self) -> bool { self.swash_font.key == other.swash_font.key } } pub struct FontLoader { font_mgr: FontMgr, cache: LruCache<FontKey, Arc<FontPair>>, font_size: f32, } #[derive(Debug, Hash, PartialEq, Eq, Clone)] pub enum FontKey { Default, Name(String), Character(char), } impl From<&str> for FontKey { fn from(string: &str) -> FontKey { let string = string.to_string(); FontKey::Name(string) } } impl From<&String> for FontKey { fn from(string: &String) -> FontKey { let string = string.to_owned(); FontKey::Name(string) } } impl From<String> for FontKey { fn from(string: String) -> FontKey { FontKey::Name(string) } } impl From<char> for FontKey { fn from(character: char) -> FontKey { FontKey::Character(character) } } impl FontLoader { pub fn new(font_size: f32) -> FontLoader { FontLoader { font_mgr: FontMgr::new(), cache: LruCache::new(10), font_size, } } fn load(&mut self, font_key: FontKey) -> Option<FontPair> { match font_key { FontKey::Default => { let default_font_data = Asset::get(DEFAULT_FONT).unwrap(); let data = Data::new_copy(&default_font_data); let typeface = Typeface::from_data(data, 0).unwrap(); FontPair::new(Font::from_typeface(typeface, self.font_size)) } FontKey::Name(name) => { let font_style = FontStyle::normal(); let typeface = self.font_mgr.match_family_style(name, font_style)?; FontPair::new(Font::from_typeface(typeface, self.font_size)) } FontKey::Character(character) => { let font_style = FontStyle::normal(); let typeface = self.font_mgr.match_family_style_character( "", font_style, &[], character as i32, )?; FontPair::new(Font::from_typeface(typeface, self.font_size)) } } } pub fn get_or_load(&mut self, font_key: FontKey) -> Option<Arc<FontPair>> { if let Some(cached) = self.cache.get(&font_key) { return Some(cached.clone()); } let loaded_font = self.load(font_key.clone())?; let font_arc = Arc::new(loaded_font); self.cache.put(font_key, font_arc.clone()); Some(font_arc) } }
26.449612
87
0.575029
4a3fe8c17cc078acce32594f89e8bc0544effd3b
7,849
use crate::deprecation::DeprecationStrategy; use crate::fragments::GqlFragment; use crate::normalization::Normalization; use crate::schema::Schema; use crate::selection::Selection; use failure::*; use proc_macro2::Span; use proc_macro2::TokenStream; use quote::quote; use std::collections::{BTreeMap, BTreeSet}; use syn::{Ident, Path}; /// This holds all the information we need during the code generation phase. pub(crate) struct QueryContext<'query, 'schema: 'query> { pub fragments: BTreeMap<&'query str, GqlFragment<'query>>, pub schema: &'schema Schema<'schema>, pub deprecation_strategy: DeprecationStrategy, pub normalization: Normalization, variables_derives: Vec<Ident>, response_derives: Vec<Ident>, serde_crate_path: Option<Path>, } impl<'query, 'schema> QueryContext<'query, 'schema> { /// Create a QueryContext with the given Schema. pub(crate) fn new( schema: &'schema Schema<'schema>, deprecation_strategy: DeprecationStrategy, normalization: Normalization, serde_crate_path: Option<Path>, ) -> QueryContext<'query, 'schema> { QueryContext { fragments: BTreeMap::new(), schema, deprecation_strategy, normalization, serde_crate_path, variables_derives: vec![Ident::new("Serialize", Span::call_site())], response_derives: vec![Ident::new("Deserialize", Span::call_site())], } } /// Mark a fragment as required, so code is actually generated for it. pub(crate) fn require_fragment(&self, typename_: &str) { if let Some(fragment) = self.fragments.get(typename_) { fragment.require(&self); } } /// For testing only. creates an empty QueryContext with an empty Schema. #[cfg(test)] pub(crate) fn new_empty(schema: &'schema Schema<'_>) -> QueryContext<'query, 'schema> { QueryContext { fragments: BTreeMap::new(), schema, deprecation_strategy: DeprecationStrategy::Allow, normalization: Normalization::None, serde_crate_path: None, variables_derives: vec![Ident::new("Serialize", Span::call_site())], response_derives: vec![Ident::new("Deserialize", Span::call_site())], } } /// Expand the deserialization data structures for the given field. pub(crate) fn maybe_expand_field( &self, ty: &str, selection: &Selection<'_>, prefix: &str, ) -> Result<Option<TokenStream>, failure::Error> { if self.schema.contains_scalar(ty) { Ok(None) } else if let Some(enm) = self.schema.enums.get(ty) { enm.is_required.set(true); Ok(None) // we already expand enums separately } else if let Some(obj) = self.schema.objects.get(ty) { obj.is_required.set(true); obj.response_for_selection(self, &selection, prefix) .map(Some) } else if let Some(iface) = self.schema.interfaces.get(ty) { iface.is_required.set(true); iface .response_for_selection(self, &selection, prefix) .map(Some) } else if let Some(unn) = self.schema.unions.get(ty) { unn.is_required.set(true); unn.response_for_selection(self, &selection, prefix) .map(Some) } else { Err(format_err!("Unknown type: {}", ty)) } } pub(crate) fn ingest_response_derives( &mut self, attribute_value: &str, ) -> Result<(), failure::Error> { if self.response_derives.len() > 1 { return Err(format_err!( "ingest_response_derives should only be called once" )); } self.response_derives.extend( attribute_value .split(',') .map(str::trim) .map(|s| Ident::new(s, Span::call_site())), ); Ok(()) } pub(crate) fn ingest_variables_derives( &mut self, attribute_value: &str, ) -> Result<(), failure::Error> { if self.variables_derives.len() > 1 { return Err(format_err!( "ingest_variables_derives should only be called once" )); } self.variables_derives.extend( attribute_value .split(',') .map(str::trim) .map(|s| Ident::new(s, Span::call_site())), ); Ok(()) } pub(crate) fn variables_derives(&self) -> TokenStream { let derives: BTreeSet<&Ident> = self.variables_derives.iter().collect(); let derives = derives.iter(); let serde_crate_attr = self.serde_crate_attr(); quote! { #[derive( #(#derives),* )] #serde_crate_attr } } pub(crate) fn response_derives(&self) -> TokenStream { let derives: BTreeSet<&Ident> = self.response_derives.iter().collect(); let derives = derives.iter(); let serde_crate_attr = self.serde_crate_attr(); quote! { #[derive( #(#derives),* )] #serde_crate_attr } } pub(crate) fn response_enum_derives(&self) -> TokenStream { let always_derives = [ Ident::new("Eq", Span::call_site()), Ident::new("PartialEq", Span::call_site()), ]; let mut enum_derives: BTreeSet<_> = self .response_derives .iter() .filter(|derive| { // Do not apply the "Default" derive to enums. let derive = derive.to_string(); derive != "Serialize" && derive != "Deserialize" && derive != "Default" }) .collect(); enum_derives.extend(always_derives.iter()); quote! { #[derive( #(#enum_derives),* )] } } fn serde_crate_attr(&self) -> TokenStream { self.serde_crate_path .as_ref() .map(|path| { let path_str = quote!(#path).to_string(); quote! { #[serde(crate = #path_str)] } }) .unwrap_or_default() } } #[cfg(test)] mod tests { use super::*; #[test] fn response_derives_ingestion_works() { let schema = crate::schema::Schema::new(); let mut context = QueryContext::new_empty(&schema); context .ingest_response_derives("PartialEq, PartialOrd, Serialize") .unwrap(); assert_eq!( context.response_derives().to_string(), "# [ derive ( Deserialize , PartialEq , PartialOrd , Serialize ) ]" ); } #[test] fn response_enum_derives_does_not_produce_empty_list() { let schema = crate::schema::Schema::new(); let context = QueryContext::new_empty(&schema); assert_eq!( context.response_enum_derives().to_string(), "# [ derive ( Eq , PartialEq ) ]" ); } #[test] fn response_enum_derives_works() { let schema = crate::schema::Schema::new(); let mut context = QueryContext::new_empty(&schema); context .ingest_response_derives("PartialEq, PartialOrd, Serialize") .unwrap(); assert_eq!( context.response_enum_derives().to_string(), "# [ derive ( Eq , PartialEq , PartialOrd ) ]" ); } #[test] fn response_derives_fails_when_called_twice() { let schema = crate::schema::Schema::new(); let mut context = QueryContext::new_empty(&schema); assert!(context .ingest_response_derives("PartialEq, PartialOrd") .is_ok()); assert!(context.ingest_response_derives("Serialize").is_err()); } }
32.704167
91
0.567206
8acfafebec47930e3d48cd6e59888e7c122f9498
2,920
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use super::Allocator; use crate::reason::Reason; use pos::{FilePos, Pos, Positioned, Symbol, TypeName}; impl<R: Reason> Allocator<R> { pub fn pos_from_ast(&self, pos: &oxidized::pos::Pos) -> R::Pos { R::Pos::mk(|| { let pos_file = self.relative_path_from_ast(pos.filename()); let ((start_lnum, start_bol, start_cnum), (end_lnum, end_bol, end_cnum)) = pos.to_start_and_end_lnum_bol_offset(); let pos_start = FilePos { lnum: start_lnum as u64, bol: start_bol as u64, cnum: start_cnum as u64, }; let pos_end = FilePos { lnum: end_lnum as u64, bol: end_bol as u64, cnum: end_cnum as u64, }; (pos_file, pos_start, pos_end) }) } pub fn pos_from_decl(&self, pos: &oxidized_by_ref::pos::Pos<'_>) -> R::Pos { R::Pos::mk(|| { let pos_file = self.relative_path_from_decl(pos.filename()); let ((start_lnum, start_bol, start_cnum), (end_lnum, end_bol, end_cnum)) = pos.to_start_and_end_lnum_bol_offset(); let pos_start = FilePos { lnum: start_lnum as u64, bol: start_bol as u64, cnum: start_cnum as u64, }; let pos_end = FilePos { lnum: end_lnum as u64, bol: end_bol as u64, cnum: end_cnum as u64, }; (pos_file, pos_start, pos_end) }) } pub fn pos_id_from_ast(&self, pos_id: &oxidized::ast_defs::Id) -> Positioned<Symbol, R::Pos> { Positioned::new(self.pos_from_ast(&pos_id.0), self.symbol(&pos_id.1)) } pub fn pos_classname_from_ast( &self, pos_id: &oxidized::ast_defs::Id, ) -> Positioned<TypeName, R::Pos> { Positioned::new( self.pos_from_ast(&pos_id.0), TypeName(self.symbol(&pos_id.1)), ) } pub fn pos_id_from_ast_ref( &self, pos_id: &oxidized_by_ref::ast_defs::Id<'_>, ) -> Positioned<Symbol, R::Pos> { Positioned::new(self.pos_from_decl(pos_id.0), self.symbol(pos_id.1)) } pub fn pos_classname_from_decl( &self, pos_id: oxidized_by_ref::typing_defs::PosId<'_>, ) -> Positioned<TypeName, R::Pos> { Positioned::new( self.pos_from_decl(pos_id.0), TypeName(self.symbol(pos_id.1)), ) } pub fn pos_id_from_decl( &self, pos_id: oxidized_by_ref::typing_defs::PosId<'_>, ) -> Positioned<Symbol, R::Pos> { Positioned::new(self.pos_from_decl(pos_id.0), self.symbol(pos_id.1)) } }
33.563218
98
0.557534
3a99bc00bcb75ace6e861058f5e5138d2f175c16
1,747
use crate::build_support::Builder; use std::fmt; use std::fmt::{Debug, Formatter}; use std::path::PathBuf; use std::process::Command; #[derive(Default, Clone)] pub struct MacBuilder {} impl MacBuilder { } impl Debug for MacBuilder { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.print_directories(f) } } impl Builder for MacBuilder { fn vm_binary(&self) -> PathBuf { self.output_directory().join("libPharoVMCore.a") } fn generate_sources(&self) { Command::new("cmake") .arg(self.cmake_build_type()) .arg("-DFEATURE_LIB_SDL2=OFF") .arg("-DFEATURE_LIB_CAIRO=OFF") .arg("-DFEATURE_LIB_FREETYPE2=OFF") .arg("-DFEATURE_LIB_GIT2=OFF") .arg("-DBUILD_BUNDLE=OFF") .arg("-DCOMPILE_STATIC_LIBRARIES=ON") .arg("-DCOMPILE_EXECUTABLE=OFF") .arg("-DPHARO_DEPENDENCIES_PREFER_DOWNLOAD_BINARIES=OFF") .arg("-S") .arg(self.vm_sources_directory()) .arg("-B") .arg(self.output_directory()) .status() .unwrap(); } fn platform_include_directory(&self) -> PathBuf { self.squeak_include_directory().join("osx") } fn generated_config_directory(&self) -> PathBuf { self.output_directory() .join("build") .join("include") .join("pharovm") } fn link_libraries(&self) { println!("cargo:rustc-link-search=native={}", self.output_directory().display()); println!("cargo:rustc-link-lib=static=PharoVMCore"); println!("cargo:rustc-link-lib=framework=AppKit"); println!("cargo:rustc-link-lib=framework=CoreGraphics"); } }
28.177419
89
0.589582
de831da1ab004be7b3f4fdd7b8c42f8afac6b5e8
1,355
use pyo3::prelude::{PyErr, PyModule, PyObject, PyResult, Python}; pub fn get_loop(py: Python) -> PyResult<PyObject> { let asyncio = PyModule::import(py, "asyncio")?; let loop_ = asyncio.getattr("get_running_loop")?.call0()?; Ok(loop_.into()) } pub fn create_future() -> PyResult<(PyObject, PyObject, PyObject)> { let gil = Python::acquire_gil(); let py = gil.python(); let loop_ = get_loop(py)?; let fut: PyObject = loop_.call_method0(py, "create_future")?; Ok((fut.clone_ref(py), fut, loop_)) } pub fn set_fut_result(loop_: PyObject, fut: PyObject, res: PyObject) -> PyResult<()> { let gil = Python::acquire_gil(); let py = gil.python(); let sr = fut.getattr(py, "set_result")?; loop_.call_method1(py, "call_soon_threadsafe", (sr, res))?; Ok(()) } pub fn set_fut_result_none(loop_: PyObject, fut: PyObject) -> PyResult<()> { let gil = Python::acquire_gil(); let py = gil.python(); let sr = fut.getattr(py, "set_result")?; loop_.call_method1(py, "call_soon_threadsafe", (sr, py.None()))?; Ok(()) } pub fn set_fut_exc(loop_: PyObject, fut: PyObject, exc: PyErr) -> PyResult<()> { let gil = Python::acquire_gil(); let py = gil.python(); let sr = fut.getattr(py, "set_exception")?; loop_.call_method1(py, "call_soon_threadsafe", (sr, exc))?; Ok(()) }
27.1
86
0.63321
2826e0b32b46b30e0614e16d3b2a65d515e4cb2c
297,653
#![doc = "generated by AutoRust"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::models; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new( endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>, scopes: Vec<String>, ) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn alerts(&self) -> alerts::Client { alerts::Client(self.clone()) } pub fn bandwidth_schedules(&self) -> bandwidth_schedules::Client { bandwidth_schedules::Client(self.clone()) } pub fn containers(&self) -> containers::Client { containers::Client(self.clone()) } pub fn devices(&self) -> devices::Client { devices::Client(self.clone()) } pub fn jobs(&self) -> jobs::Client { jobs::Client(self.clone()) } pub fn nodes(&self) -> nodes::Client { nodes::Client(self.clone()) } pub fn operations(&self) -> operations::Client { operations::Client(self.clone()) } pub fn operations_status(&self) -> operations_status::Client { operations_status::Client(self.clone()) } pub fn orders(&self) -> orders::Client { orders::Client(self.clone()) } pub fn roles(&self) -> roles::Client { roles::Client(self.clone()) } pub fn shares(&self) -> shares::Client { shares::Client(self.clone()) } pub fn skus(&self) -> skus::Client { skus::Client(self.clone()) } pub fn storage_account_credentials(&self) -> storage_account_credentials::Client { storage_account_credentials::Client(self.clone()) } pub fn storage_accounts(&self) -> storage_accounts::Client { storage_accounts::Client(self.clone()) } pub fn triggers(&self) -> triggers::Client { triggers::Client(self.clone()) } pub fn users(&self) -> users::Client { users::Client(self.clone()) } } #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] Devices_ListBySubscription(#[from] devices::list_by_subscription::Error), #[error(transparent)] Devices_ListByResourceGroup(#[from] devices::list_by_resource_group::Error), #[error(transparent)] Devices_Get(#[from] devices::get::Error), #[error(transparent)] Devices_CreateOrUpdate(#[from] devices::create_or_update::Error), #[error(transparent)] Devices_Update(#[from] devices::update::Error), #[error(transparent)] Devices_Delete(#[from] devices::delete::Error), #[error(transparent)] Alerts_ListByDataBoxEdgeDevice(#[from] alerts::list_by_data_box_edge_device::Error), #[error(transparent)] Alerts_Get(#[from] alerts::get::Error), #[error(transparent)] BandwidthSchedules_ListByDataBoxEdgeDevice(#[from] bandwidth_schedules::list_by_data_box_edge_device::Error), #[error(transparent)] BandwidthSchedules_Get(#[from] bandwidth_schedules::get::Error), #[error(transparent)] BandwidthSchedules_CreateOrUpdate(#[from] bandwidth_schedules::create_or_update::Error), #[error(transparent)] BandwidthSchedules_Delete(#[from] bandwidth_schedules::delete::Error), #[error(transparent)] Devices_DownloadUpdates(#[from] devices::download_updates::Error), #[error(transparent)] Devices_GetExtendedInformation(#[from] devices::get_extended_information::Error), #[error(transparent)] Devices_InstallUpdates(#[from] devices::install_updates::Error), #[error(transparent)] Jobs_Get(#[from] jobs::get::Error), #[error(transparent)] Devices_GetNetworkSettings(#[from] devices::get_network_settings::Error), #[error(transparent)] Nodes_ListByDataBoxEdgeDevice(#[from] nodes::list_by_data_box_edge_device::Error), #[error(transparent)] OperationsStatus_Get(#[from] operations_status::get::Error), #[error(transparent)] Orders_ListByDataBoxEdgeDevice(#[from] orders::list_by_data_box_edge_device::Error), #[error(transparent)] Orders_Get(#[from] orders::get::Error), #[error(transparent)] Orders_CreateOrUpdate(#[from] orders::create_or_update::Error), #[error(transparent)] Orders_Delete(#[from] orders::delete::Error), #[error(transparent)] Roles_ListByDataBoxEdgeDevice(#[from] roles::list_by_data_box_edge_device::Error), #[error(transparent)] Roles_Get(#[from] roles::get::Error), #[error(transparent)] Roles_CreateOrUpdate(#[from] roles::create_or_update::Error), #[error(transparent)] Roles_Delete(#[from] roles::delete::Error), #[error(transparent)] Devices_ScanForUpdates(#[from] devices::scan_for_updates::Error), #[error(transparent)] Devices_CreateOrUpdateSecuritySettings(#[from] devices::create_or_update_security_settings::Error), #[error(transparent)] Shares_ListByDataBoxEdgeDevice(#[from] shares::list_by_data_box_edge_device::Error), #[error(transparent)] Shares_Get(#[from] shares::get::Error), #[error(transparent)] Shares_CreateOrUpdate(#[from] shares::create_or_update::Error), #[error(transparent)] Shares_Delete(#[from] shares::delete::Error), #[error(transparent)] Shares_Refresh(#[from] shares::refresh::Error), #[error(transparent)] StorageAccountCredentials_ListByDataBoxEdgeDevice(#[from] storage_account_credentials::list_by_data_box_edge_device::Error), #[error(transparent)] StorageAccountCredentials_Get(#[from] storage_account_credentials::get::Error), #[error(transparent)] StorageAccountCredentials_CreateOrUpdate(#[from] storage_account_credentials::create_or_update::Error), #[error(transparent)] StorageAccountCredentials_Delete(#[from] storage_account_credentials::delete::Error), #[error(transparent)] StorageAccounts_ListByDataBoxEdgeDevice(#[from] storage_accounts::list_by_data_box_edge_device::Error), #[error(transparent)] StorageAccounts_Get(#[from] storage_accounts::get::Error), #[error(transparent)] StorageAccounts_CreateOrUpdate(#[from] storage_accounts::create_or_update::Error), #[error(transparent)] StorageAccounts_Delete(#[from] storage_accounts::delete::Error), #[error(transparent)] Containers_ListByStorageAccount(#[from] containers::list_by_storage_account::Error), #[error(transparent)] Containers_Get(#[from] containers::get::Error), #[error(transparent)] Containers_CreateOrUpdate(#[from] containers::create_or_update::Error), #[error(transparent)] Containers_Delete(#[from] containers::delete::Error), #[error(transparent)] Containers_Refresh(#[from] containers::refresh::Error), #[error(transparent)] Triggers_ListByDataBoxEdgeDevice(#[from] triggers::list_by_data_box_edge_device::Error), #[error(transparent)] Triggers_Get(#[from] triggers::get::Error), #[error(transparent)] Triggers_CreateOrUpdate(#[from] triggers::create_or_update::Error), #[error(transparent)] Triggers_Delete(#[from] triggers::delete::Error), #[error(transparent)] Devices_GetUpdateSummary(#[from] devices::get_update_summary::Error), #[error(transparent)] Devices_UploadCertificate(#[from] devices::upload_certificate::Error), #[error(transparent)] Users_ListByDataBoxEdgeDevice(#[from] users::list_by_data_box_edge_device::Error), #[error(transparent)] Users_Get(#[from] users::get::Error), #[error(transparent)] Users_CreateOrUpdate(#[from] users::create_or_update::Error), #[error(transparent)] Users_Delete(#[from] users::delete::Error), #[error(transparent)] Skus_List(#[from] skus::list::Error), } pub mod operations { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "List all the supported operations."] pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationsList, Error>> { Box::pin(async move { let url_str = &format!("{}/providers/Microsoft.DataBoxEdge/operations", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::OperationsList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod devices { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder { list_by_subscription::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), expand: None, } } pub fn list_by_resource_group( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), expand: None, } } pub fn get( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, data_box_edge_device: impl Into<models::DataBoxEdgeDevice>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), data_box_edge_device: data_box_edge_device.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn update( &self, device_name: impl Into<String>, parameters: impl Into<models::DataBoxEdgeDevicePatch>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), device_name: device_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Downloads the updates on a Data Box Edge/Data Box Gateway device."] pub fn download_updates( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> download_updates::Builder { download_updates::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get_extended_information( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get_extended_information::Builder { get_extended_information::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Installs the updates on the Data Box Edge/Data Box Gateway device."] pub fn install_updates( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> install_updates::Builder { install_updates::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get_network_settings( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get_network_settings::Builder { get_network_settings::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Scans for updates on a Data Box Edge/Data Box Gateway device."] pub fn scan_for_updates( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> scan_for_updates::Builder { scan_for_updates::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update_security_settings( &self, device_name: impl Into<String>, security_settings: impl Into<models::SecuritySettings>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update_security_settings::Builder { create_or_update_security_settings::Builder { client: self.0.clone(), device_name: device_name.into(), security_settings: security_settings.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets information about the availability of updates based on the last scan of the device. It also gets information about any ongoing download or install jobs on the device."] pub fn get_update_summary( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get_update_summary::Builder { get_update_summary::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn upload_certificate( &self, device_name: impl Into<String>, parameters: impl Into<models::UploadCertificateRequest>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> upload_certificate::Builder { upload_certificate::Builder { client: self.0.clone(), device_name: device_name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_subscription { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) expand: Option<String>, } impl Builder { pub fn expand(mut self, expand: impl Into<String>) -> Self { self.expand = Some(expand.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDeviceList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); if let Some(expand) = &self.expand { url.query_pairs_mut().append_pair("$expand", expand); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDeviceList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_resource_group { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) expand: Option<String>, } impl Builder { pub fn expand(mut self, expand: impl Into<String>) -> Self { self.expand = Some(expand.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDeviceList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices", self.client.endpoint(), &self.subscription_id, &self.resource_group_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); if let Some(expand) = &self.expand { url.query_pairs_mut().append_pair("$expand", expand); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDeviceList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDevice, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDevice = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) data_box_edge_device: models::DataBoxEdgeDevice, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDevice, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.data_box_edge_device).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDevice = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) parameters: models::DataBoxEdgeDevicePatch, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDevice, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDevice = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod download_updates { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/downloadUpdates", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get_extended_information { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future( self, ) -> futures::future::BoxFuture<'static, std::result::Result<models::DataBoxEdgeDeviceExtendedInfo, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/getExtendedInformation" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::DataBoxEdgeDeviceExtendedInfo = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod install_updates { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/installUpdates", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get_network_settings { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::NetworkSettings, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/networkSettings/default" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::NetworkSettings = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod scan_for_updates { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/scanForUpdates", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update_security_settings { use super::models; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) security_settings: models::SecuritySettings, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/securitySettings/default/update" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.security_settings).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get_update_summary { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UpdateSummary, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/updateSummary/default", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::UpdateSummary = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod upload_certificate { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) parameters: models::UploadCertificateRequest, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UploadCertificateResponse, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/uploadCertificate", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::UploadCertificateResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod alerts { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets an alert by name."] pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::AlertList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/alerts", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::AlertList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Alert, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/alerts/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Alert = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod bandwidth_schedules { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, parameters: impl Into<models::BandwidthSchedule>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), parameters: parameters.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BandwidthSchedulesList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/bandwidthSchedules", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::BandwidthSchedulesList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BandwidthSchedule, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/bandwidthSchedules/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::BandwidthSchedule = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::BandwidthSchedule), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) parameters: models::BandwidthSchedule, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/bandwidthSchedules/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::BandwidthSchedule = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/bandwidthSchedules/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod jobs { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Gets the details of a specified job on a Data Box Edge/Data Box Gateway device."] pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Job, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/jobs/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Job = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod nodes { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::NodeList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/nodes", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::NodeList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod operations_status { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Gets the details of a specified job on a Data Box Edge/Data Box Gateway device."] pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Job, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/operationsStatus/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Job = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod orders { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Lists all the orders related to a Data Box Edge/Data Box Gateway device."] pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets a specific order by name."] pub fn get( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Creates or updates an order."] pub fn create_or_update( &self, device_name: impl Into<String>, order: impl Into<models::Order>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), order: order.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Deletes the order related to the device."] pub fn delete( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OrderList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/orders", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::OrderList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Order, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/orders/default", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Order = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Order), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) order: models::Order, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/orders/default", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.order).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Order = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/orders/default", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod roles { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, role: impl Into<models::Role>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), role: role.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::RoleList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/roles", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::RoleList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Role, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/roles/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Role = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Role), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) role: models::Role, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/roles/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.role).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Role = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/roles/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod shares { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Lists all the shares in a Data Box Edge/Data Box Gateway device."] pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets a share by name."] pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Creates a new share or updates an existing share on the device."] pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, share: impl Into<models::Share>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), share: share.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Refreshes the share metadata with the data from the cloud."] pub fn refresh( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> refresh::Builder { refresh::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ShareList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/shares", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ShareList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Share, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/shares/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Share = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Share), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) share: models::Share, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/shares/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.share).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Share = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/shares/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod refresh { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/shares/{}/refresh", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod storage_account_credentials { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device."] pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, storage_account_credential: impl Into<models::StorageAccountCredential>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), storage_account_credential: storage_account_credential.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future( self, ) -> futures::future::BoxFuture<'static, std::result::Result<models::StorageAccountCredentialList, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccountCredentials" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccountCredentialList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::StorageAccountCredential, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccountCredentials/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccountCredential = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::StorageAccountCredential), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) storage_account_credential: models::StorageAccountCredential, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccountCredentials/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.storage_account_credential).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccountCredential = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccountCredentials/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod storage_accounts { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Lists all the storage accounts in a Data Box Edge/Data Box Gateway device."] pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets a StorageAccount by name."] pub fn get( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Creates a new StorageAccount or updates an existing StorageAccount on the device."] pub fn create_or_update( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, storage_account: impl Into<models::StorageAccount>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), storage_account: storage_account.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::StorageAccountList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccountList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::StorageAccount, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.storage_account_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccount = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::StorageAccount), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) storage_account: models::StorageAccount, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.storage_account_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.storage_account).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::StorageAccount = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.storage_account_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod containers { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "Lists all the containers of a storage Account in a Data Box Edge/Data Box Gateway device."] pub fn list_by_storage_account( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_storage_account::Builder { list_by_storage_account::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Gets a container by name."] pub fn get( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, container_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), container_name: container_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Creates a new container or updates an existing container on the device."] pub fn create_or_update( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, container_name: impl Into<String>, container: impl Into<models::Container>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), container_name: container_name.into(), container: container.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, container_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), container_name: container_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } #[doc = "Refreshes the container metadata with the data from the cloud."] pub fn refresh( &self, device_name: impl Into<String>, storage_account_name: impl Into<String>, container_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> refresh::Builder { refresh::Builder { client: self.0.clone(), device_name: device_name.into(), storage_account_name: storage_account_name.into(), container_name: container_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_storage_account { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ContainerList, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}/containers" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . storage_account_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ContainerList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) container_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Container, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}/containers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . storage_account_name , & self . container_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Container = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Container), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) container_name: String, pub(crate) container: models::Container, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}/containers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . storage_account_name , & self . container_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.container).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Container = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) container_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}/containers/{}" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . storage_account_name , & self . container_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod refresh { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) storage_account_name: String, pub(crate) container_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/storageAccounts/{}/containers/{}/refresh" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . device_name , & self . storage_account_name , & self . container_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod triggers { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), filter: None, } } pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, trigger: impl Into<models::Trigger>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), trigger: trigger.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) filter: Option<String>, } impl Builder { pub fn filter(mut self, filter: impl Into<String>) -> Self { self.filter = Some(filter.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::TriggerList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/triggers", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); if let Some(filter) = &self.filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::TriggerList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Trigger, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/triggers/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Trigger = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::Trigger), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) trigger: models::Trigger, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/triggers/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.trigger).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Trigger = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/triggers/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod users { use super::models; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_data_box_edge_device( &self, device_name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> list_by_data_box_edge_device::Builder { list_by_data_box_edge_device::Builder { client: self.0.clone(), device_name: device_name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), filter: None, } } pub fn get( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn create_or_update( &self, device_name: impl Into<String>, name: impl Into<String>, user: impl Into<models::User>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), user: user.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } pub fn delete( &self, device_name: impl Into<String>, name: impl Into<String>, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), device_name: device_name.into(), name: name.into(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), } } } pub mod list_by_data_box_edge_device { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) filter: Option<String>, } impl Builder { pub fn filter(mut self, filter: impl Into<String>) -> Self { self.filter = Some(filter.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::UserList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/users", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); if let Some(filter) = &self.filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::UserList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::User, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/users/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::User = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::models; #[derive(Debug)] pub enum Response { Ok200(models::User), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) user: models::User, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/users/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.user).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::User = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::models; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) device_name: String, pub(crate) name: String, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{}/users/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.device_name, &self.name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::ACCEPTED => Ok(Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod skus { use super::models; pub struct Client(pub(crate) super::Client); impl Client { #[doc = "List all the available Skus in the region and information related to them"] pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder { list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), filter: None, } } } pub mod list { use super::models; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::CloudError, }, #[error("Failed to parse request URL")] ParseUrl(#[source] url::ParseError), #[error("Failed to build request")] BuildRequest(#[source] http::Error), #[error("Failed to serialize request body")] Serialize(#[source] serde_json::Error), #[error("Failed to get access token")] GetToken(#[source] azure_core::Error), #[error("Failed to execute request")] SendRequest(#[source] azure_core::Error), #[error("Failed to get response bytes")] ResponseBytes(#[source] azure_core::StreamError), #[error("Failed to deserialize response, body: {1:?}")] Deserialize(#[source] serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) filter: Option<String>, } impl Builder { pub fn filter(mut self, filter: impl Into<String>) -> Self { self.filter = Some(filter.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SkuInformationList, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataBoxEdge/skus", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", "2019-08-01"); if let Some(filter) = &self.filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = azure_core::EMPTY_BODY; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SkuInformationList = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::CloudError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } }
50.621259
352
0.518258
e5a7c6b9dc3a9e98e26ce5924709aa47d8504fad
2,834
/** * Copyright (c) 2019, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ use crate::lexable_trivia::LexableTrivia; use crate::source_text::SourceText; use crate::trivia_kind::TriviaKind; #[derive(Debug, Clone, PartialEq)] pub struct PositionedTrivia { pub kind: TriviaKind, pub offset: usize, pub width: usize, } impl LexableTrivia for PositionedTrivia { fn kind(&self) -> TriviaKind { self.kind } fn width(&self) -> usize { self.width } fn make_whitespace(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::WhiteSpace, offset, width, } } fn make_eol(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::EndOfLine, offset, width, } } fn make_single_line_comment(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::SingleLineComment, offset, width, } } fn make_fallthrough(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::FallThrough, offset, width, } } fn make_unsafe(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::Unsafe, offset, width, } } fn make_unsafe_expression(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::UnsafeExpression, offset, width, } } fn make_fix_me(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::FixMe, offset, width, } } fn make_ignore_error(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::IgnoreError, offset, width, } } fn make_extra_token_error(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::ExtraTokenError, offset, width, } } fn make_delimited_comment(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::DelimitedComment, offset, width, } } fn make_after_halt_compiler(_source: &SourceText, offset: usize, width: usize) -> Self { Self { kind: TriviaKind::AfterHaltCompiler, offset, width, } } }
24.222222
92
0.54446
ddb98aaf506e17050adc810cde9c6da7a22bd2a8
13,648
/* use crate::error::RatsioError; use crate::ops::Op; use futures::{future::{self, Either}, prelude::*, SinkExt}; use std::{ net::{SocketAddr, ToSocketAddrs}, str::FromStr, sync::Arc, }; use futures::channel::mpsc::UnboundedSender; pub(crate) type ReconnectHandler = UnboundedSender<Arc<NatsConnection>>; use std::task::{Context, Poll}; use std::pin::Pin; use futures::executor::LocalPool; use futures::task::{LocalSpawnExt, SpawnExt}; use std::sync::RwLock; use crate::net::nats_tcp_stream::NatsTcpStream; use futures::stream::{SplitSink, SplitStream}; use futures::task; use url::Url; use futures_timer::Delay; use std::ops::Add; use pin_project::{pin_project, project}; /// State of the raw connection #[derive(Debug, Clone, Copy, PartialEq)] pub(crate) enum NatsConnectionState { Connected, Reconnecting, Disconnected, } /// Represents a connection to a NATS server. Implements `Sink` and `Stream` #[derive(Debug)] pub struct NatsConnection { /// indicates if the connection is made over TLS pub(crate) is_tls: bool, /// Inner dual `Stream`/`Sink` of the TCP connection pub(crate) inner: Arc<RwLock<(Url, NatsConnectionInner)>>, /// Current state of the connection, and connect version. /// Version only increments on a successful reconnect. pub(crate) state: Arc<RwLock<(NatsConnectionState, u64)>>, /// Reconnect trigger pub(crate) reconnect_handler: ReconnectHandler, pub(crate) init_hosts: Vec<String>, pub(crate) reconnect_hosts: RwLock<Vec<String>>, pub(crate) reconnect_timeout: u64, } pub struct NatsConnSinkStream { /// Inner dual `Stream`/`Sink` of the TCP connection pub(crate) inner: Arc<RwLock<(Url, NatsConnectionInner)>>, /// Current state of the connection, and connect version. /// Version only increments on a successful reconnect. pub(crate) state: Arc<RwLock<(NatsConnectionState, u64)>>, /// Reconnect trigger pub(crate) reconnect_trigger: Box<Fn() -> () + Sync + Send>, } type NatsConnectionInner = (SplitSink<NatsTcpStream, Op>, SplitStream<NatsTcpStream>); impl NatsConnection { /// Connect to a raw TCP socket async fn connect(addr: SocketAddr) -> Result<NatsConnectionInner, RatsioError> { let tcp_stream = tokio::net::TcpStream::connect(&addr).await?; let (write_stream, read_stream) = NatsTcpStream::new(tcp_stream).await.split(); debug!(target: "ratsio", "Got a socket successfully."); Ok((write_stream, read_stream).into()) } /// Connect to a TLS over TCP socket. Upgrade is performed automatically async fn connect_tls(host: String, addr: SocketAddr) -> Result<NatsConnectionInner, RatsioError> { let tcp_stream = tokio::net::TcpStream::connect(&addr).await?; let (write_stream, read_stream) = NatsTcpStream::new(tcp_stream).await.split(); debug!(target: "ratsio", "Got a socket successfully."); Ok((write_stream, read_stream).into()) } /// Tries to reconnect once to the server; Only used internally. Blocks polling during reconnecting /// by forcing the object to return `Async::NotReady`/`AsyncSink::NotReady` pub(crate) fn trigger_reconnect(conn: Arc<Self>) { trace!(target: "ratsio", "Trigger reconnection "); let connect_version = conn.state.read().unwrap().1; { let mut state_guard = conn.state.write().unwrap(); if state_guard.0 == NatsConnectionState::Reconnecting { // Another thread is busy reconnecting... trace!(target: "ratsio", "Already reconnection, nothing to do"); return; } else if state_guard.0 == NatsConnectionState::Connected && state_guard.1 > connect_version { // Another thread has already reconnected ... trace!(target: "ratsio", "Another thread has reconnected, nothing to do"); return; } else { let current_version = state_guard.1; *state_guard = (NatsConnectionState::Disconnected, current_version); } } NatsConnection::reconnect(conn); } fn reconnect(conn: Arc<Self>) { trace!(target: "ratsio", "Reconnecting"); { let mut state_guard = conn.state.write().unwrap(); if state_guard.0 == NatsConnectionState::Disconnected { *state_guard = (NatsConnectionState::Reconnecting, state_guard.1); } else { return; } } let cluster_addrs: Vec<_> = NatsConnection::parse_uris(&conn.reconnect_hosts.read().unwrap()); trace!(target: "ratsio", "Retrying {:?}", &*conn.reconnect_hosts.read().unwrap()); let mut executor = LocalPool::new(); let spawner = executor.spawner(); spawner.spawn(async move { let inner_result = NatsConnection::get_conn_inner(cluster_addrs, conn.is_tls).await; let connect_version = (*conn.state.read().unwrap()).1; let retry_conn = conn.clone(); match inner_result { Ok(new_inner) => { *conn.inner.write().unwrap() = new_inner; *conn.state.write().unwrap() = (NatsConnectionState::Connected, connect_version + 1); let _ = conn.reconnect_handler.unbounded_send(conn.clone()); debug!(target: "ratsio", "Got a connection"); } Err(err) => { error!(target: "ratsio", "Error reconnecting :: {:?}", err); *retry_conn.state.write().unwrap() = (NatsConnectionState::Disconnected, connect_version); //Rescedule another attempt use std::time::{Instant, Duration}; let _ = Delay::new(Duration::from_millis(retry_conn.reconnect_timeout)).await; NatsConnection::trigger_reconnect(retry_conn); } }; }); } pub async fn create_connection(reconnect_handler: ReconnectHandler, reconnect_timeout: u64, cluster_uris: &[String], tls_required: bool) -> Result<NatsConnection, RatsioError> { let cluster_addrs = NatsConnection::parse_uris(cluster_uris); let init_hosts = cluster_uris.to_vec(); let inner = NatsConnection::get_conn_inner(cluster_addrs, tls_required).await?; Ok(NatsConnection { is_tls: tls_required, state: Arc::new(RwLock::new((NatsConnectionState::Connected, 0))), inner: Arc::new(RwLock::new(inner)), init_hosts: init_hosts.clone(), reconnect_hosts: RwLock::new(init_hosts), reconnect_handler, reconnect_timeout, }) } pub fn parse_uris(cluster_uris: &[String]) -> Vec<(Url, SocketAddr)> { cluster_uris.iter().map(|cluster_uri| { let formatted_url = if cluster_uri.starts_with("nats://") { cluster_uri.clone() } else { format!("nats://{}", cluster_uri) }; let node_url = Url::parse(&formatted_url); match node_url { Ok(node_url) => match node_url.host_str() { Some(host) => { let host_and_port = format!("{}:{}", &host, node_url.port().unwrap_or(4222)); match SocketAddr::from_str(&host_and_port) { Ok(sock_addr) => { info!(" Resolved {} to {}", &host, &sock_addr); vec!((node_url.clone(), sock_addr)) } Err(_) => { match host_and_port.to_socket_addrs() { Ok(ips_iter) => ips_iter.map(|x| { info!(" Resolved {} to {}", &host, &x); (node_url.clone(), x) }).collect::<Vec<_>>(), Err(err) => { error!("Unable resolve url => {} to ip address => {}", cluster_uri, err); Vec::new() } } } } } _ => { Vec::new() } } Err(err) => { error!("Unable to parse url => {} => {}", cluster_uri, err); Vec::new() } } }).flatten().collect() } async fn get_conn_inner(cluster_addrs: Vec<(Url, SocketAddr)>, tls_required: bool) -> Result<(Url, NatsConnectionInner), RatsioError> { if cluster_addrs.is_empty() { warn!("No addresses to connect to."); return Err(RatsioError::NoRouteToHostError); } async fn get_conn_step(cluster_addrs: &[(Url, SocketAddr)], tls_required: bool) -> Result<(Url, NatsConnectionInner), RatsioError> { if cluster_addrs.is_empty() { Err(RatsioError::NoRouteToHostError) } else { let (node_url, node_addr) = cluster_addrs[0].clone(); if tls_required { match node_url.host_str() { Some(host) => { let conn = NatsConnection::connect_tls(host.to_string(), node_addr).await?; Ok((node_url.clone(), conn)) } None => Err(RatsioError::NoRouteToHostError), } } else { let conn = NatsConnection::connect(node_addr).await?; Ok((node_url.clone(), conn)) } } } let mut cluster_addrs = cluster_addrs; loop { match get_conn_step(&cluster_addrs[..], tls_required).await { Ok(inner) => { return Ok(inner); } Err(err) => { let rem_addrs = Vec::from(&cluster_addrs[1..]).clone(); if rem_addrs.is_empty() { return Err(RatsioError::NoRouteToHostError); } else { cluster_addrs = rem_addrs } } } } } } impl Sink<Op> for NatsConnSinkStream { type Error = RatsioError; fn start_send(self: Pin<&mut Self>, item: Op) -> Result<(), Self::Error> { if match self.state.try_read() { Ok(state) => (*state).0 != NatsConnectionState::Connected, _ => true, } { return Err(RatsioError::ServerDisconnected(None)); } if let Ok(mut inner) = self.inner.write() { //let mut sink = &mut ((*inner).1).0; match Sink::start_send(Pin::new(&mut ((*inner).1).0), item) { Err(RatsioError::ServerDisconnected(_)) => { (*self.reconnect_trigger)(); Ok(()) } poll_res => poll_res, } } else { Err(RatsioError::ServerDisconnected(None)) } } fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { match self.state.try_read() { Ok(state) => if (*state).0 != NatsConnectionState::Connected { //TODO check underlying connection for readiness Sink::poll_ready(Pin::new(&mut ((*inner).1).0), cx) Poll::Ready(Ok(())) } else { Poll::Pending } _ => Poll::Pending } } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if let Ok(mut inner) = self.inner.write() { match Sink::poll_flush(Pin::new(&mut ((*inner).1).0), cx) { Poll::Ready(Err(RatsioError::ServerDisconnected(_))) => { (*self.reconnect_trigger)(); Poll::Pending } poll_res => poll_res, } } else { Poll::Pending } } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if let Ok(mut inner) = self.inner.try_write() { match Sink::poll_close(Pin::new(&mut ((*inner).1).0), cx){ Poll::Ready(Err(RatsioError::ServerDisconnected(_))) => { (*self.reconnect_trigger)(); Poll::Pending } poll_res => poll_res, } } else { Poll::Pending } } } impl Stream for NatsConnSinkStream { type Item = Op; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { if match self.state.try_read() { Ok(state) => (*state).0 != NatsConnectionState::Connected, _ => true, } { return Poll::Pending; } if let Ok(mut inner) = self.inner.try_write() { (Pin::new(&mut ((*inner).1).1)).poll_next(cx) } else { Poll::Pending } } } */
39.906433
120
0.520223
69fd28d1723685067d77df6a650b693f6f7b09c2
43,149
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15679 //! An owned, growable string that enforces that its contents are valid UTF-8. #![stable(feature = "rust1", since = "1.0.0")] use core::prelude::*; use core::borrow::{Cow, IntoCow}; use core::default::Default; use core::error::Error; use core::fmt; use core::hash; use core::iter::FromIterator; use core::mem; use core::ops::{self, Deref, Add, Index}; use core::ptr; use core::raw::Slice as RawSlice; use unicode::str as unicode_str; use unicode::str::Utf16Item; use str::{self, CharRange, FromStr, Utf8Error}; use vec::{DerefVec, Vec, as_vec}; /// A growable string stored as a UTF-8 encoded buffer. #[derive(Clone, PartialOrd, Eq, Ord)] #[stable(feature = "rust1", since = "1.0.0")] pub struct String { vec: Vec<u8>, } /// A possible error value from the `String::from_utf8` function. #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct FromUtf8Error { bytes: Vec<u8>, error: Utf8Error, } /// A possible error value from the `String::from_utf16` function. #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct FromUtf16Error(()); impl String { /// Creates a new string buffer initialized with the empty string. /// /// # Examples /// /// ``` /// let mut s = String::new(); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> String { String { vec: Vec::new(), } } /// Creates a new string buffer with the given capacity. /// The string will be able to hold exactly `capacity` bytes without /// reallocating. If `capacity` is 0, the string will not allocate. /// /// # Examples /// /// ``` /// let mut s = String::with_capacity(10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(capacity: usize) -> String { String { vec: Vec::with_capacity(capacity), } } /// Creates a new string buffer from the given string. /// /// # Examples /// /// ``` /// let s = String::from_str("hello"); /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] #[unstable(feature = "collections", reason = "needs investigation to see if to_string() can match perf")] pub fn from_str(string: &str) -> String { String { vec: ::slice::SliceExt::to_vec(string.as_bytes()) } } /// Returns the vector as a string buffer, if possible, taking care not to /// copy it. /// /// # Failure /// /// If the given vector is not valid UTF-8, then the original vector and the /// corresponding error is returned. /// /// # Examples /// /// ```rust /// use std::str::Utf8Error; /// /// let hello_vec = vec![104, 101, 108, 108, 111]; /// let s = String::from_utf8(hello_vec).unwrap(); /// assert_eq!(s, "hello"); /// /// let invalid_vec = vec![240, 144, 128]; /// let s = String::from_utf8(invalid_vec).err().unwrap(); /// assert_eq!(s.utf8_error(), Utf8Error::TooShort); /// assert_eq!(s.into_bytes(), vec![240, 144, 128]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> { match str::from_utf8(&vec) { Ok(..) => Ok(String { vec: vec }), Err(e) => Err(FromUtf8Error { bytes: vec, error: e }) } } /// Converts a vector of bytes to a new UTF-8 string. /// Any invalid UTF-8 sequences are replaced with U+FFFD REPLACEMENT CHARACTER. /// /// # Examples /// /// ```rust /// let input = b"Hello \xF0\x90\x80World"; /// let output = String::from_utf8_lossy(input); /// assert_eq!(output.as_slice(), "Hello \u{FFFD}World"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> CowString<'a> { let mut i = 0; match str::from_utf8(v) { Ok(s) => return Cow::Borrowed(s), Err(e) => { if let Utf8Error::InvalidByte(firstbad) = e { i = firstbad; } } } static TAG_CONT_U8: u8 = 128u8; static REPLACEMENT: &'static [u8] = b"\xEF\xBF\xBD"; // U+FFFD in UTF-8 let total = v.len(); fn unsafe_get(xs: &[u8], i: usize) -> u8 { unsafe { *xs.get_unchecked(i) } } fn safe_get(xs: &[u8], i: usize, total: usize) -> u8 { if i >= total { 0 } else { unsafe_get(xs, i) } } let mut res = String::with_capacity(total); if i > 0 { unsafe { res.as_mut_vec().push_all(&v[..i]) }; } // subseqidx is the index of the first byte of the subsequence we're looking at. // It's used to copy a bunch of contiguous good codepoints at once instead of copying // them one by one. let mut subseqidx = i; while i < total { let i_ = i; let byte = unsafe_get(v, i); i += 1; macro_rules! error { () => ({ unsafe { if subseqidx != i_ { res.as_mut_vec().push_all(&v[subseqidx..i_]); } subseqidx = i; res.as_mut_vec().push_all(REPLACEMENT); } })} if byte < 128u8 { // subseqidx handles this } else { let w = unicode_str::utf8_char_width(byte); match w { 2 => { if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 3 => { match (byte, safe_get(v, i, total)) { (0xE0 , 0xA0 ... 0xBF) => (), (0xE1 ... 0xEC, 0x80 ... 0xBF) => (), (0xED , 0x80 ... 0x9F) => (), (0xEE ... 0xEF, 0x80 ... 0xBF) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 4 => { match (byte, safe_get(v, i, total)) { (0xF0 , 0x90 ... 0xBF) => (), (0xF1 ... 0xF3, 0x80 ... 0xBF) => (), (0xF4 , 0x80 ... 0x8F) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } _ => { error!(); continue; } } } } if subseqidx < total { unsafe { res.as_mut_vec().push_all(&v[subseqidx..total]) }; } Cow::Owned(res) } /// Decode a UTF-16 encoded vector `v` into a `String`, returning `None` /// if `v` contains any invalid data. /// /// # Examples /// /// ```rust /// // 𝄞music /// let mut v = &mut [0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0x0069, 0x0063]; /// assert_eq!(String::from_utf16(v).unwrap(), /// "𝄞music".to_string()); /// /// // 𝄞mu<invalid>ic /// v[4] = 0xD800; /// assert!(String::from_utf16(v).is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> { let mut s = String::with_capacity(v.len()); for c in unicode_str::utf16_items(v) { match c { Utf16Item::ScalarValue(c) => s.push(c), Utf16Item::LoneSurrogate(_) => return Err(FromUtf16Error(())), } } Ok(s) } /// Decode a UTF-16 encoded vector `v` into a string, replacing /// invalid data with the replacement character (U+FFFD). /// /// # Examples /// /// ```rust /// // 𝄞mus<invalid>ic<invalid> /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0xDD1E, 0x0069, 0x0063, /// 0xD834]; /// /// assert_eq!(String::from_utf16_lossy(v), /// "𝄞mus\u{FFFD}ic\u{FFFD}".to_string()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { unicode_str::utf16_items(v).map(|c| c.to_char_lossy()).collect() } /// Creates a new `String` from a length, capacity, and pointer. /// /// This is unsafe because: /// * We call `Vec::from_raw_parts` to get a `Vec<u8>`; /// * We assume that the `Vec` contains valid UTF-8. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String { String { vec: Vec::from_raw_parts(buf, length, capacity), } } /// Converts a vector of bytes to a new `String` without checking if /// it contains valid UTF-8. This is unsafe because it assumes that /// the UTF-8-ness of the vector has already been validated. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String { String { vec: bytes } } /// Return the underlying byte buffer, encoded as UTF-8. /// /// # Examples /// /// ``` /// let s = String::from_str("hello"); /// let bytes = s.into_bytes(); /// assert_eq!(bytes, vec![104, 101, 108, 108, 111]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn into_bytes(self) -> Vec<u8> { self.vec } /// Pushes the given string onto this string buffer. /// /// # Examples /// /// ``` /// let mut s = String::from_str("foo"); /// s.push_str("bar"); /// assert_eq!(s.as_slice(), "foobar"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn push_str(&mut self, string: &str) { self.vec.push_all(string.as_bytes()) } /// Returns the number of bytes that this string buffer can hold without /// reallocating. /// /// # Examples /// /// ``` /// let s = String::with_capacity(10); /// assert!(s.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.vec.capacity() } /// Reserves capacity for at least `additional` more bytes to be inserted /// in the given `String`. The collection may reserve more space to avoid /// frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// let mut s = String::new(); /// s.reserve(10); /// assert!(s.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { self.vec.reserve(additional) } /// Reserves the minimum capacity for exactly `additional` more bytes to be /// inserted in the given `String`. Does nothing if the capacity is already /// sufficient. /// /// Note that the allocator may give the collection more space than it /// requests. Therefore capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// let mut s = String::new(); /// s.reserve(10); /// assert!(s.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.vec.reserve_exact(additional) } /// Shrinks the capacity of this string buffer to match its length. /// /// # Examples /// /// ``` /// let mut s = String::from_str("foo"); /// s.reserve(100); /// assert!(s.capacity() >= 100); /// s.shrink_to_fit(); /// assert_eq!(s.capacity(), 3); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { self.vec.shrink_to_fit() } /// Adds the given character to the end of the string. /// /// # Examples /// /// ``` /// let mut s = String::from_str("abc"); /// s.push('1'); /// s.push('2'); /// s.push('3'); /// assert_eq!(s.as_slice(), "abc123"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn push(&mut self, ch: char) { if (ch as u32) < 0x80 { self.vec.push(ch as u8); return; } let cur_len = self.len(); // This may use up to 4 bytes. self.vec.reserve(4); unsafe { // Attempt to not use an intermediate buffer by just pushing bytes // directly onto this string. let slice = RawSlice { data: self.vec.as_ptr().offset(cur_len as isize), len: 4, }; let used = ch.encode_utf8(mem::transmute(slice)).unwrap_or(0); self.vec.set_len(cur_len + used); } } /// Works with the underlying buffer as a byte slice. /// /// # Examples /// /// ``` /// let s = String::from_str("hello"); /// let b: &[_] = &[104, 101, 108, 108, 111]; /// assert_eq!(s.as_bytes(), b); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn as_bytes(&self) -> &[u8] { &self.vec } /// Shortens a string to the specified length. /// /// # Panics /// /// Panics if `new_len` > current length, /// or if `new_len` is not a character boundary. /// /// # Examples /// /// ``` /// let mut s = String::from_str("hello"); /// s.truncate(2); /// assert_eq!(s.as_slice(), "he"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, new_len: usize) { assert!(self.is_char_boundary(new_len)); self.vec.truncate(new_len) } /// Removes the last character from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// # Examples /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('f')); /// assert_eq!(s.pop(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn pop(&mut self) -> Option<char> { let len = self.len(); if len == 0 { return None } let CharRange {ch, next} = self.char_range_at_reverse(len); unsafe { self.vec.set_len(next); } Some(ch) } /// Removes the character from the string buffer at byte position `idx` and /// returns it. /// /// # Warning /// /// This is an O(n) operation as it requires copying every element in the /// buffer. /// /// # Panics /// /// If `idx` does not lie on a character boundary, or if it is out of /// bounds, then this function will panic. /// /// # Examples /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.remove(0), 'f'); /// assert_eq!(s.remove(1), 'o'); /// assert_eq!(s.remove(0), 'o'); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, idx: usize) -> char { let len = self.len(); assert!(idx <= len); let CharRange { ch, next } = self.char_range_at(idx); unsafe { ptr::copy_memory(self.vec.as_mut_ptr().offset(idx as isize), self.vec.as_ptr().offset(next as isize), len - next); self.vec.set_len(len - (next - idx)); } ch } /// Insert a character into the string buffer at byte position `idx`. /// /// # Warning /// /// This is an O(n) operation as it requires copying every element in the /// buffer. /// /// # Panics /// /// If `idx` does not lie on a character boundary or is out of bounds, then /// this function will panic. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(&mut self, idx: usize, ch: char) { let len = self.len(); assert!(idx <= len); assert!(self.is_char_boundary(idx)); self.vec.reserve(4); let mut bits = [0; 4]; let amt = ch.encode_utf8(&mut bits).unwrap(); unsafe { ptr::copy_memory(self.vec.as_mut_ptr().offset((idx + amt) as isize), self.vec.as_ptr().offset(idx as isize), len - idx); ptr::copy_memory(self.vec.as_mut_ptr().offset(idx as isize), bits.as_ptr(), amt); self.vec.set_len(len + amt); } } /// Views the string buffer as a mutable sequence of bytes. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Examples /// /// ``` /// let mut s = String::from_str("hello"); /// unsafe { /// let vec = s.as_mut_vec(); /// assert!(vec == &mut vec![104, 101, 108, 108, 111]); /// vec.reverse(); /// } /// assert_eq!(s.as_slice(), "olleh"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> { &mut self.vec } /// Return the number of bytes in this string. /// /// # Examples /// /// ``` /// let a = "foo".to_string(); /// assert_eq!(a.len(), 3); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { self.vec.len() } /// Returns true if the string contains no bytes /// /// # Examples /// /// ``` /// let mut v = String::new(); /// assert!(v.is_empty()); /// v.push('a'); /// assert!(!v.is_empty()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Truncates the string, returning it to 0 length. /// /// # Examples /// /// ``` /// let mut s = "foo".to_string(); /// s.clear(); /// assert!(s.is_empty()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { self.vec.clear() } } impl FromUtf8Error { /// Consume this error, returning the bytes that were attempted to make a /// `String` with. #[stable(feature = "rust1", since = "1.0.0")] pub fn into_bytes(self) -> Vec<u8> { self.bytes } /// Access the underlying UTF8-error that was the cause of this error. #[stable(feature = "rust1", since = "1.0.0")] pub fn utf8_error(&self) -> Utf8Error { self.error } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf8Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.error, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl Error for FromUtf8Error { fn description(&self) -> &str { "invalid utf-8" } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf16Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt("invalid utf-16: lone surrogate found", f) } } #[stable(feature = "rust1", since = "1.0.0")] impl Error for FromUtf16Error { fn description(&self) -> &str { "invalid utf-16" } } #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator<char> for String { fn from_iter<I:Iterator<Item=char>>(iterator: I) -> String { let mut buf = String::new(); buf.extend(iterator); buf } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> FromIterator<&'a str> for String { fn from_iter<I:Iterator<Item=&'a str>>(iterator: I) -> String { let mut buf = String::new(); buf.extend(iterator); buf } } #[unstable(feature = "collections", reason = "waiting on Extend stabilization")] impl Extend<char> for String { fn extend<I:Iterator<Item=char>>(&mut self, iterator: I) { let (lower_bound, _) = iterator.size_hint(); self.reserve(lower_bound); for ch in iterator { self.push(ch) } } } #[unstable(feature = "collections", reason = "waiting on Extend stabilization")] impl<'a> Extend<&'a str> for String { fn extend<I: Iterator<Item=&'a str>>(&mut self, iterator: I) { // A guess that at least one byte per iterator element will be needed. let (lower_bound, _) = iterator.size_hint(); self.reserve(lower_bound); for s in iterator { self.push_str(s) } } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for String { #[inline] fn eq(&self, other: &String) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &String) -> bool { PartialEq::ne(&**self, &**other) } } macro_rules! impl_eq { ($lhs:ty, $rhs: ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &$rhs) -> bool { PartialEq::ne(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &$lhs) -> bool { PartialEq::ne(&**self, &**other) } } } } impl_eq! { String, &'a str } impl_eq! { CowString<'a>, String } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> PartialEq<&'b str> for CowString<'a> { #[inline] fn eq(&self, other: &&'b str) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &&'b str) -> bool { PartialEq::ne(&**self, &**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> PartialEq<CowString<'a>> for &'b str { #[inline] fn eq(&self, other: &CowString<'a>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &CowString<'a>) -> bool { PartialEq::ne(&**self, &**other) } } #[unstable(feature = "collections", reason = "waiting on Str stabilization")] impl Str for String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn as_slice(&self) -> &str { unsafe { mem::transmute(&*self.vec) } } } #[stable(feature = "rust1", since = "1.0.0")] impl Default for String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn default() -> String { String::new() } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for String { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for String { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[unstable(feature = "collections", reason = "waiting on Hash stabilization")] impl<H: hash::Writer + hash::Hasher> hash::Hash<H> for String { #[inline] fn hash(&self, hasher: &mut H) { (**self).hash(hasher) } } #[unstable(feature = "collections", reason = "recent addition, needs more experience")] impl<'a> Add<&'a str> for String { type Output = String; #[inline] fn add(mut self, other: &str) -> String { self.push_str(other); self } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::Range<usize>> for String { type Output = str; #[inline] fn index(&self, index: &ops::Range<usize>) -> &str { &self[][*index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeTo<usize>> for String { type Output = str; #[inline] fn index(&self, index: &ops::RangeTo<usize>) -> &str { &self[][*index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeFrom<usize>> for String { type Output = str; #[inline] fn index(&self, index: &ops::RangeFrom<usize>) -> &str { &self[][*index] } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index<ops::RangeFull> for String { type Output = str; #[inline] fn index(&self, _index: &ops::RangeFull) -> &str { unsafe { mem::transmute(&*self.vec) } } } #[stable(feature = "rust1", since = "1.0.0")] impl ops::Deref for String { type Target = str; #[inline] fn deref(&self) -> &str { unsafe { mem::transmute(&self.vec[]) } } } /// Wrapper type providing a `&String` reference via `Deref`. #[unstable(feature = "collections")] pub struct DerefString<'a> { x: DerefVec<'a, u8> } impl<'a> Deref for DerefString<'a> { type Target = String; #[inline] fn deref<'b>(&'b self) -> &'b String { unsafe { mem::transmute(&*self.x) } } } /// Convert a string slice to a wrapper type providing a `&String` reference. /// /// # Examples /// /// ``` /// use std::string::as_string; /// /// fn string_consumer(s: String) { /// assert_eq!(s, "foo".to_string()); /// } /// /// let string = as_string("foo").clone(); /// string_consumer(string); /// ``` #[unstable(feature = "collections")] pub fn as_string<'a>(x: &'a str) -> DerefString<'a> { DerefString { x: as_vec(x.as_bytes()) } } #[unstable(feature = "collections", reason = "associated error type may change")] impl FromStr for String { type Err = (); #[inline] fn from_str(s: &str) -> Result<String, ()> { Ok(String::from_str(s)) } } /// A generic trait for converting a value to a string #[stable(feature = "rust1", since = "1.0.0")] pub trait ToString { /// Converts the value of `self` to an owned string #[stable(feature = "rust1", since = "1.0.0")] fn to_string(&self) -> String; } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Display + ?Sized> ToString for T { #[inline] fn to_string(&self) -> String { use core::fmt::Write; let mut buf = String::new(); let _ = buf.write_fmt(format_args!("{}", self)); buf.shrink_to_fit(); buf } } impl IntoCow<'static, String, str> for String { #[inline] fn into_cow(self) -> CowString<'static> { Cow::Owned(self) } } impl<'a> IntoCow<'a, String, str> for &'a str { #[inline] fn into_cow(self) -> CowString<'a> { Cow::Borrowed(self) } } /// A clone-on-write string #[stable(feature = "rust1", since = "1.0.0")] pub type CowString<'a> = Cow<'a, String, str>; impl<'a> Str for CowString<'a> { #[inline] fn as_slice<'b>(&'b self) -> &'b str { &**self } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Write for String { #[inline] fn write_str(&mut self, s: &str) -> fmt::Result { self.push_str(s); Ok(()) } } #[cfg(test)] mod tests { use prelude::*; use test::Bencher; use str::Utf8Error; use core::iter::repeat; use super::{as_string, CowString}; #[test] fn test_as_string() { let x = "foo"; assert_eq!(x, &**as_string(x)); } #[test] fn test_from_str() { let owned: Option<::std::string::String> = "string".parse().ok(); assert_eq!(owned.as_ref().map(|s| &**s), Some("string")); } #[test] fn test_unsized_to_string() { let s: &str = "abc"; let _: String = (*s).to_string(); } #[test] fn test_from_utf8() { let xs = b"hello".to_vec(); assert_eq!(String::from_utf8(xs).unwrap(), String::from_str("hello")); let xs = "ศไทย中华Việt Nam".as_bytes().to_vec(); assert_eq!(String::from_utf8(xs).unwrap(), String::from_str("ศไทย中华Việt Nam")); let xs = b"hello\xFF".to_vec(); let err = String::from_utf8(xs).err().unwrap(); assert_eq!(err.utf8_error(), Utf8Error::TooShort); assert_eq!(err.into_bytes(), b"hello\xff".to_vec()); } #[test] fn test_from_utf8_lossy() { let xs = b"hello"; let ys: CowString = "hello".into_cow(); assert_eq!(String::from_utf8_lossy(xs), ys); let xs = "ศไทย中华Việt Nam".as_bytes(); let ys: CowString = "ศไทย中华Việt Nam".into_cow(); assert_eq!(String::from_utf8_lossy(xs), ys); let xs = b"Hello\xC2 There\xFF Goodbye"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("Hello\u{FFFD} There\u{FFFD} Goodbye").into_cow()); let xs = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye").into_cow()); let xs = b"\xF5foo\xF5\x80bar"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("\u{FFFD}foo\u{FFFD}\u{FFFD}bar").into_cow()); let xs = b"\xF1foo\xF1\x80bar\xF1\x80\x80baz"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("\u{FFFD}foo\u{FFFD}bar\u{FFFD}baz").into_cow()); let xs = b"\xF4foo\xF4\x80bar\xF4\xBFbaz"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("\u{FFFD}foo\u{FFFD}bar\u{FFFD}\u{FFFD}baz").into_cow()); let xs = b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("\u{FFFD}\u{FFFD}\u{FFFD}\u{FFFD}\ foo\u{10000}bar").into_cow()); // surrogates let xs = b"\xED\xA0\x80foo\xED\xBF\xBFbar"; assert_eq!(String::from_utf8_lossy(xs), String::from_str("\u{FFFD}\u{FFFD}\u{FFFD}foo\ \u{FFFD}\u{FFFD}\u{FFFD}bar").into_cow()); } #[test] fn test_from_utf16() { let pairs = [(String::from_str("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"), vec![0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf46_u16, 0xd800_u16, 0xdf39_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf30_u16, 0x000a_u16]), (String::from_str("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"), vec![0xd801_u16, 0xdc12_u16, 0xd801_u16, 0xdc49_u16, 0xd801_u16, 0xdc2e_u16, 0xd801_u16, 0xdc40_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4b_u16, 0x0020_u16, 0xd801_u16, 0xdc0f_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4d_u16, 0x000a_u16]), (String::from_str("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"), vec![0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf11_u16, 0xd800_u16, 0xdf09_u16, 0x00b7_u16, 0xd800_u16, 0xdf0c_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf15_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]), (String::from_str("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"), vec![0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc91_u16, 0xd801_u16, 0xdc9b_u16, 0xd801_u16, 0xdc92_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc93_u16, 0x0020_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc9a_u16, 0xd801_u16, 0xdc8d_u16, 0x0020_u16, 0xd801_u16, 0xdc8f_u16, 0xd801_u16, 0xdc9c_u16, 0xd801_u16, 0xdc92_u16, 0xd801_u16, 0xdc96_u16, 0xd801_u16, 0xdc86_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc86_u16, 0x000a_u16 ]), // Issue #12318, even-numbered non-BMP planes (String::from_str("\u{20000}"), vec![0xD840, 0xDC00])]; for p in &pairs { let (s, u) = (*p).clone(); let s_as_utf16 = s.utf16_units().collect::<Vec<u16>>(); let u_as_string = String::from_utf16(&u).unwrap(); assert!(::unicode::str::is_utf16(&u)); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); assert_eq!(String::from_utf16_lossy(&u), s); assert_eq!(String::from_utf16(&s_as_utf16).unwrap(), s); assert_eq!(u_as_string.utf16_units().collect::<Vec<u16>>(), u); } } #[test] fn test_utf16_invalid() { // completely positive cases tested above. // lead + eof assert!(String::from_utf16(&[0xD800]).is_err()); // lead + lead assert!(String::from_utf16(&[0xD800, 0xD800]).is_err()); // isolated trail assert!(String::from_utf16(&[0x0061, 0xDC00]).is_err()); // general assert!(String::from_utf16(&[0xD800, 0xd801, 0xdc8b, 0xD800]).is_err()); } #[test] fn test_from_utf16_lossy() { // completely positive cases tested above. // lead + eof assert_eq!(String::from_utf16_lossy(&[0xD800]), String::from_str("\u{FFFD}")); // lead + lead assert_eq!(String::from_utf16_lossy(&[0xD800, 0xD800]), String::from_str("\u{FFFD}\u{FFFD}")); // isolated trail assert_eq!(String::from_utf16_lossy(&[0x0061, 0xDC00]), String::from_str("a\u{FFFD}")); // general assert_eq!(String::from_utf16_lossy(&[0xD800, 0xd801, 0xdc8b, 0xD800]), String::from_str("\u{FFFD}𐒋\u{FFFD}")); } #[test] fn test_push_bytes() { let mut s = String::from_str("ABC"); unsafe { let mv = s.as_mut_vec(); mv.push_all(&[b'D']); } assert_eq!(s, "ABCD"); } #[test] fn test_push_str() { let mut s = String::new(); s.push_str(""); assert_eq!(&s[0..], ""); s.push_str("abc"); assert_eq!(&s[0..], "abc"); s.push_str("ประเทศไทย中华Việt Nam"); assert_eq!(&s[0..], "abcประเทศไทย中华Việt Nam"); } #[test] fn test_push() { let mut data = String::from_str("ประเทศไทย中"); data.push('华'); data.push('b'); // 1 byte data.push('¢'); // 2 byte data.push('€'); // 3 byte data.push('𤭢'); // 4 byte assert_eq!(data, "ประเทศไทย中华b¢€𤭢"); } #[test] fn test_pop() { let mut data = String::from_str("ประเทศไทย中华b¢€𤭢"); assert_eq!(data.pop().unwrap(), '𤭢'); // 4 bytes assert_eq!(data.pop().unwrap(), '€'); // 3 bytes assert_eq!(data.pop().unwrap(), '¢'); // 2 bytes assert_eq!(data.pop().unwrap(), 'b'); // 1 bytes assert_eq!(data.pop().unwrap(), '华'); assert_eq!(data, "ประเทศไทย中"); } #[test] fn test_str_truncate() { let mut s = String::from_str("12345"); s.truncate(5); assert_eq!(s, "12345"); s.truncate(3); assert_eq!(s, "123"); s.truncate(0); assert_eq!(s, ""); let mut s = String::from_str("12345"); let p = s.as_ptr(); s.truncate(3); s.push_str("6"); let p_ = s.as_ptr(); assert_eq!(p_, p); } #[test] #[should_fail] fn test_str_truncate_invalid_len() { let mut s = String::from_str("12345"); s.truncate(6); } #[test] #[should_fail] fn test_str_truncate_split_codepoint() { let mut s = String::from_str("\u{FC}"); // ü s.truncate(1); } #[test] fn test_str_clear() { let mut s = String::from_str("12345"); s.clear(); assert_eq!(s.len(), 0); assert_eq!(s, ""); } #[test] fn test_str_add() { let a = String::from_str("12345"); let b = a + "2"; let b = b + "2"; assert_eq!(b.len(), 7); assert_eq!(b, "1234522"); } #[test] fn remove() { let mut s = "ศไทย中华Việt Nam; foobar".to_string();; assert_eq!(s.remove(0), 'ศ'); assert_eq!(s.len(), 33); assert_eq!(s, "ไทย中华Việt Nam; foobar"); assert_eq!(s.remove(17), 'ệ'); assert_eq!(s, "ไทย中华Vit Nam; foobar"); } #[test] #[should_fail] fn remove_bad() { "ศ".to_string().remove(1); } #[test] fn insert() { let mut s = "foobar".to_string(); s.insert(0, 'ệ'); assert_eq!(s, "ệfoobar"); s.insert(6, 'ย'); assert_eq!(s, "ệfooยbar"); } #[test] #[should_fail] fn insert_bad1() { "".to_string().insert(1, 't'); } #[test] #[should_fail] fn insert_bad2() { "ệ".to_string().insert(1, 't'); } #[test] fn test_slicing() { let s = "foobar".to_string(); assert_eq!("foobar", &s[]); assert_eq!("foo", &s[..3]); assert_eq!("bar", &s[3..]); assert_eq!("oob", &s[1..4]); } #[test] fn test_simple_types() { assert_eq!(1.to_string(), "1"); assert_eq!((-1).to_string(), "-1"); assert_eq!(200.to_string(), "200"); assert_eq!(2u8.to_string(), "2"); assert_eq!(true.to_string(), "true"); assert_eq!(false.to_string(), "false"); assert_eq!(("hi".to_string()).to_string(), "hi"); } #[test] fn test_vectors() { let x: Vec<i32> = vec![]; assert_eq!(format!("{:?}", x), "[]"); assert_eq!(format!("{:?}", vec![1]), "[1]"); assert_eq!(format!("{:?}", vec![1, 2, 3]), "[1, 2, 3]"); assert!(format!("{:?}", vec![vec![], vec![1], vec![1, 1]]) == "[[], [1], [1, 1]]"); } #[test] fn test_from_iterator() { let s = "ศไทย中华Việt Nam".to_string(); let t = "ศไทย中华"; let u = "Việt Nam"; let a: String = s.chars().collect(); assert_eq!(s, a); let mut b = t.to_string(); b.extend(u.chars()); assert_eq!(s, b); let c: String = vec![t, u].into_iter().collect(); assert_eq!(s, c); let mut d = t.to_string(); d.extend(vec![u].into_iter()); assert_eq!(s, d); } #[bench] fn bench_with_capacity(b: &mut Bencher) { b.iter(|| { String::with_capacity(100) }); } #[bench] fn bench_push_str(b: &mut Bencher) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; b.iter(|| { let mut r = String::new(); r.push_str(s); }); } const REPETITIONS: u64 = 10_000; #[bench] fn bench_push_str_one_byte(b: &mut Bencher) { b.bytes = REPETITIONS; b.iter(|| { let mut r = String::new(); for _ in 0..REPETITIONS { r.push_str("a") } }); } #[bench] fn bench_push_char_one_byte(b: &mut Bencher) { b.bytes = REPETITIONS; b.iter(|| { let mut r = String::new(); for _ in 0..REPETITIONS { r.push('a') } }); } #[bench] fn bench_push_char_two_bytes(b: &mut Bencher) { b.bytes = REPETITIONS * 2; b.iter(|| { let mut r = String::new(); for _ in 0..REPETITIONS { r.push('â') } }); } #[bench] fn from_utf8_lossy_100_ascii(b: &mut Bencher) { let s = b"Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_multibyte(b: &mut Bencher) { let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes(); assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_invalid(b: &mut Bencher) { let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_invalid(b: &mut Bencher) { let s = repeat(0xf5u8).take(100).collect::<Vec<_>>(); b.iter(|| { let _ = String::from_utf8_lossy(&s); }); } #[bench] fn bench_exact_size_shrink_to_fit(b: &mut Bencher) { let s = "Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; // ensure our operation produces an exact-size string before we benchmark it let mut r = String::with_capacity(s.len()); r.push_str(s); assert_eq!(r.len(), r.capacity()); b.iter(|| { let mut r = String::with_capacity(s.len()); r.push_str(s); r.shrink_to_fit(); r }); } }
29.943789
99
0.50912
915986059edb08c6f13efe963aa3974e9f1c9412
134
use compiler::{load_config, run}; #[tokio::main] async fn main() { let config = load_config().unwrap(); run(config).await; }
16.75
40
0.634328
0e493c4afc3b89294049e7614c4848b561bbdd45
51,060
use clap::{ crate_description, crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, App, Arg, ArgMatches, }; use log::*; use rand::{thread_rng, Rng}; use solana_clap_utils::{ input_parsers::{keypair_of, keypairs_of, pubkey_of}, input_validators::{ is_keypair_or_ask_keyword, is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, }, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, }; use solana_client::rpc_client::RpcClient; use solana_core::ledger_cleanup_service::{ DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS, }; use solana_core::{ cluster_info::{ClusterInfo, Node, MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, contact_info::ContactInfo, gossip_service::GossipService, rpc::JsonRpcConfig, validator::{Validator, ValidatorConfig}, }; use solana_download_utils::{download_genesis_if_missing, download_snapshot}; use solana_perf::recycler::enable_recycler_warming; use solana_runtime::{ bank_forks::{CompressionType, SnapshotConfig, SnapshotVersion}, hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, }; use solana_sdk::{ clock::Slot, commitment_config::CommitmentConfig, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, signature::{Keypair, Signer}, }; use std::{ collections::HashSet, env, fs::{self, File}, net::{SocketAddr, TcpListener, UdpSocket}, path::PathBuf, process::exit, str::FromStr, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, thread::{sleep, JoinHandle}, time::{Duration, Instant}, }; fn port_validator(port: String) -> Result<(), String> { port.parse::<u16>() .map(|_| ()) .map_err(|e| format!("{:?}", e)) } fn port_range_validator(port_range: String) -> Result<(), String> { if let Some((start, end)) = solana_net_utils::parse_port_range(&port_range) { if end - start < MINIMUM_VALIDATOR_PORT_RANGE_WIDTH { Err(format!( "Port range is too small. Try --dynamic-port-range {}-{}", start, start + MINIMUM_VALIDATOR_PORT_RANGE_WIDTH )) } else { Ok(()) } } else { Err("Invalid port range".to_string()) } } fn hash_validator(hash: String) -> Result<(), String> { Hash::from_str(&hash) .map(|_| ()) .map_err(|e| format!("{:?}", e)) } fn get_shred_rpc_peers( cluster_info: &ClusterInfo, expected_shred_version: Option<u16>, ) -> Vec<ContactInfo> { let rpc_peers = cluster_info.all_rpc_peers(); match expected_shred_version { Some(expected_shred_version) => { // Filter out rpc peers that don't match the expected shred version rpc_peers .into_iter() .filter(|contact_info| contact_info.shred_version == expected_shred_version) .collect::<Vec<_>>() } None => { if !rpc_peers .iter() .all(|contact_info| contact_info.shred_version == rpc_peers[0].shred_version) { eprintln!( "Multiple shred versions observed in gossip. Restart with --expected-shred-version" ); exit(1); } rpc_peers } } } fn is_trusted_validator(id: &Pubkey, trusted_validators: &Option<HashSet<Pubkey>>) -> bool { if let Some(trusted_validators) = trusted_validators { trusted_validators.contains(id) } else { false } } fn get_trusted_snapshot_hashes( cluster_info: &ClusterInfo, trusted_validators: &Option<HashSet<Pubkey>>, ) -> Option<HashSet<(Slot, Hash)>> { if let Some(trusted_validators) = trusted_validators { let mut trusted_snapshot_hashes = HashSet::new(); for trusted_validator in trusted_validators { cluster_info.get_snapshot_hash_for_node(trusted_validator, |snapshot_hashes| { for snapshot_hash in snapshot_hashes { trusted_snapshot_hashes.insert(*snapshot_hash); } }); } Some(trusted_snapshot_hashes) } else { None } } fn start_gossip_node( identity_keypair: &Arc<Keypair>, entrypoint_gossip: &SocketAddr, gossip_addr: &SocketAddr, gossip_socket: UdpSocket, expected_shred_version: Option<u16>, ) -> (Arc<ClusterInfo>, Arc<AtomicBool>, GossipService) { let cluster_info = ClusterInfo::new( ClusterInfo::gossip_contact_info( &identity_keypair.pubkey(), *gossip_addr, expected_shred_version.unwrap_or(0), ), identity_keypair.clone(), ); cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint_gossip)); let cluster_info = Arc::new(cluster_info); let gossip_exit_flag = Arc::new(AtomicBool::new(false)); let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &gossip_exit_flag); (cluster_info, gossip_exit_flag, gossip_service) } fn get_rpc_node( cluster_info: &ClusterInfo, validator_config: &ValidatorConfig, blacklisted_rpc_nodes: &mut HashSet<Pubkey>, snapshot_not_required: bool, no_untrusted_rpc: bool, ) -> (ContactInfo, Option<(Slot, Hash)>) { let mut blacklist_timeout = Instant::now(); loop { info!( "Searching for an RPC service, shred version={:?}...", validator_config.expected_shred_version ); sleep(Duration::from_secs(1)); info!("\n{}", cluster_info.contact_info_trace()); let rpc_peers = get_shred_rpc_peers(&cluster_info, validator_config.expected_shred_version); let rpc_peers_total = rpc_peers.len(); // Filter out blacklisted nodes let rpc_peers: Vec<_> = rpc_peers .into_iter() .filter(|rpc_peer| !blacklisted_rpc_nodes.contains(&rpc_peer.id)) .collect(); let rpc_peers_blacklisted = rpc_peers_total - rpc_peers.len(); let rpc_peers_trusted = rpc_peers .iter() .filter(|rpc_peer| { is_trusted_validator(&rpc_peer.id, &validator_config.trusted_validators) }) .count(); info!( "Total {} RPC nodes found. {} trusted, {} blacklisted ", rpc_peers_total, rpc_peers_trusted, rpc_peers_blacklisted ); if rpc_peers_blacklisted == rpc_peers_total { // If all nodes are blacklisted and no additional nodes are discovered after 60 seconds, // remove the blacklist and try them all again if blacklist_timeout.elapsed().as_secs() > 60 { info!("Blacklist timeout expired"); blacklisted_rpc_nodes.clear(); } continue; } blacklist_timeout = Instant::now(); let mut highest_snapshot_hash: Option<(Slot, Hash)> = None; let eligible_rpc_peers = if snapshot_not_required { rpc_peers } else { let trusted_snapshot_hashes = get_trusted_snapshot_hashes(&cluster_info, &validator_config.trusted_validators); let mut eligible_rpc_peers = vec![]; for rpc_peer in rpc_peers.iter() { if no_untrusted_rpc && !is_trusted_validator(&rpc_peer.id, &validator_config.trusted_validators) { continue; } cluster_info.get_snapshot_hash_for_node(&rpc_peer.id, |snapshot_hashes| { for snapshot_hash in snapshot_hashes { if let Some(ref trusted_snapshot_hashes) = trusted_snapshot_hashes { if !trusted_snapshot_hashes.contains(snapshot_hash) { // Ignore all untrusted snapshot hashes continue; } } if highest_snapshot_hash.is_none() || snapshot_hash.0 > highest_snapshot_hash.unwrap().0 { // Found a higher snapshot, remove all nodes with a lower snapshot eligible_rpc_peers.clear(); highest_snapshot_hash = Some(*snapshot_hash) } if Some(*snapshot_hash) == highest_snapshot_hash { eligible_rpc_peers.push(rpc_peer.clone()); } } }); } match highest_snapshot_hash { None => { assert!(eligible_rpc_peers.is_empty()); info!("No snapshots available"); } Some(highest_snapshot_hash) => { info!( "Highest available snapshot slot is {}, available from {} node{}: {:?}", highest_snapshot_hash.0, eligible_rpc_peers.len(), if eligible_rpc_peers.len() > 1 { "s" } else { "" }, eligible_rpc_peers .iter() .map(|contact_info| contact_info.id) .collect::<Vec<_>>() ); } } eligible_rpc_peers }; if !eligible_rpc_peers.is_empty() { let contact_info = &eligible_rpc_peers[thread_rng().gen_range(0, eligible_rpc_peers.len())]; return (contact_info.clone(), highest_snapshot_hash); } } } fn check_vote_account( rpc_client: &RpcClient, identity_pubkey: &Pubkey, vote_account_address: &Pubkey, authorized_voter_pubkeys: &[Pubkey], ) -> Result<(), String> { let vote_account = rpc_client .get_account_with_commitment(vote_account_address, CommitmentConfig::root()) .map_err(|err| format!("failed to fetch vote account: {}", err.to_string()))? .value .ok_or_else(|| format!("vote account does not exist: {}", vote_account_address))?; if vote_account.owner != solana_vote_program::id() { return Err(format!( "not a vote account (owned by {}): {}", vote_account.owner, vote_account_address )); } let identity_account = rpc_client .get_account_with_commitment(identity_pubkey, CommitmentConfig::root()) .map_err(|err| format!("failed to fetch identity account: {}", err.to_string()))? .value .ok_or_else(|| format!("identity account does not exist: {}", identity_pubkey))?; let vote_state = solana_vote_program::vote_state::VoteState::from(&vote_account); if let Some(vote_state) = vote_state { if vote_state.authorized_voters().is_empty() { return Err("Vote account not yet initialized".to_string()); } if vote_state.node_pubkey != *identity_pubkey { return Err(format!( "vote account's identity ({}) does not match the validator's identity {}).", vote_state.node_pubkey, identity_pubkey )); } for (_, vote_account_authorized_voter_pubkey) in vote_state.authorized_voters().iter() { if !authorized_voter_pubkeys.contains(&vote_account_authorized_voter_pubkey) { return Err(format!( "authorized voter {} not available", vote_account_authorized_voter_pubkey )); } } } else { return Err(format!( "invalid vote account data for {}", vote_account_address )); } // Maybe we can calculate minimum voting fee; rather than 1 lamport if identity_account.lamports <= 1 { return Err(format!( "underfunded identity account ({}): only {} lamports available", identity_pubkey, identity_account.lamports )); } Ok(()) } // This function is duplicated in ledger-tool/src/main.rs... fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> { if matches.is_present(name) { Some(values_t_or_exit!(matches, name, Slot)) } else { None } } fn check_genesis_hash( genesis_config: &GenesisConfig, expected_genesis_hash: Option<Hash>, ) -> Result<(), String> { let genesis_hash = genesis_config.hash(); if let Some(expected_genesis_hash) = expected_genesis_hash { if expected_genesis_hash != genesis_hash { return Err(format!( "Genesis hash mismatch: expected {} but downloaded genesis hash is {}", expected_genesis_hash, genesis_hash, )); } } Ok(()) } fn download_then_check_genesis_hash( rpc_addr: &SocketAddr, ledger_path: &std::path::Path, expected_genesis_hash: Option<Hash>, max_genesis_archive_unpacked_size: u64, ) -> Result<Hash, String> { let genesis_package = ledger_path.join("genesis.tar.bz2"); let genesis_config = if let Ok(tmp_genesis_package) = download_genesis_if_missing(rpc_addr, &genesis_package) { unpack_genesis_archive( &tmp_genesis_package, &ledger_path, max_genesis_archive_unpacked_size, ) .map_err(|err| format!("Failed to unpack downloaded genesis config: {}", err))?; let downloaded_genesis = GenesisConfig::load(&ledger_path) .map_err(|err| format!("Failed to load downloaded genesis config: {}", err))?; check_genesis_hash(&downloaded_genesis, expected_genesis_hash)?; std::fs::rename(tmp_genesis_package, genesis_package) .map_err(|err| format!("Unable to rename: {:?}", err))?; downloaded_genesis } else { let existing_genesis = GenesisConfig::load(&ledger_path) .map_err(|err| format!("Failed to load genesis config: {}", err))?; check_genesis_hash(&existing_genesis, expected_genesis_hash)?; existing_genesis }; Ok(genesis_config.hash()) } fn is_snapshot_config_invalid( snapshot_interval_slots: u64, accounts_hash_interval_slots: u64, ) -> bool { snapshot_interval_slots != 0 && (snapshot_interval_slots < accounts_hash_interval_slots || snapshot_interval_slots % accounts_hash_interval_slots != 0) } #[cfg(unix)] fn redirect_stderr(filename: &str) { use std::{fs::OpenOptions, os::unix::io::AsRawFd}; match OpenOptions::new() .write(true) .create(true) .append(true) .open(filename) { Ok(file) => unsafe { libc::dup2(file.as_raw_fd(), libc::STDERR_FILENO); }, Err(err) => eprintln!("Unable to open {}: {}", filename, err), } } fn start_logger(logfile: Option<String>) -> Option<JoinHandle<()>> { let logger_thread = match logfile { None => None, Some(logfile) => { #[cfg(unix)] { let signals = signal_hook::iterator::Signals::new(&[signal_hook::SIGUSR1]) .unwrap_or_else(|err| { eprintln!("Unable to register SIGUSR1 handler: {:?}", err); exit(1); }); redirect_stderr(&logfile); Some(std::thread::spawn(move || { for signal in signals.forever() { info!( "received SIGUSR1 ({}), reopening log file: {:?}", signal, logfile ); redirect_stderr(&logfile); } })) } #[cfg(not(unix))] { println!("logging to a file is not supported on this platform"); () } } }; solana_logger::setup_with_default( &[ "solana=info", /* info logging for all solana modules */ "rpc=trace", /* json_rpc request/response logging */ ] .join(","), ); logger_thread } #[allow(clippy::cognitive_complexity)] pub fn main() { let default_dynamic_port_range = &format!("{}-{}", VALIDATOR_PORT_RANGE.0, VALIDATOR_PORT_RANGE.1); let default_limit_ledger_size = &DEFAULT_MAX_LEDGER_SHREDS.to_string(); let default_genesis_archive_unpacked_size = &MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string(); let matches = App::new(crate_name!()).about(crate_description!()) .version(solana_version::version!()) .arg( Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), ) .arg( Arg::with_name("identity") .short("i") .long("identity") .value_name("PATH") .takes_value(true) .validator(is_keypair_or_ask_keyword) .help("Validator identity keypair"), ) .arg( Arg::with_name("authorized_voter_keypairs") .long("authorized-voter") .value_name("PATH") .takes_value(true) .validator(is_keypair_or_ask_keyword) .requires("vote_account") .multiple(true) .help("Include an additional authorized voter keypair. \ May be specified multiple times. \ [default: the --identity keypair]"), ) .arg( Arg::with_name("vote_account") .long("vote-account") .value_name("PUBKEY") .takes_value(true) .validator(is_pubkey_or_keypair) .requires("identity") .help("Validator vote account public key. \ If unspecified voting will be disabled. \ The authorized voter for the account must either be the \ --identity keypair or with the --authorized-voter argument") ) .arg( Arg::with_name("init_complete_file") .long("init-complete-file") .value_name("FILE") .takes_value(true) .help("Create this file if it doesn't already exist \ once node initialization is complete"), ) .arg( Arg::with_name("ledger_path") .short("l") .long("ledger") .value_name("DIR") .takes_value(true) .required(true) .help("Use DIR as persistent ledger location"), ) .arg( Arg::with_name("entrypoint") .short("n") .long("entrypoint") .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) .help("Rendezvous with the cluster at this gossip entrypoint"), ) .arg( Arg::with_name("no_snapshot_fetch") .long("no-snapshot-fetch") .takes_value(false) .requires("entrypoint") .help("Do not attempt to fetch a snapshot from the cluster, \ start from a local snapshot if present"), ) .arg( Arg::with_name("no_genesis_fetch") .long("no-genesis-fetch") .takes_value(false) .requires("entrypoint") .help("Do not fetch genesis from the cluster"), ) .arg( Arg::with_name("no_voting") .long("no-voting") .takes_value(false) .help("Launch node without voting"), ) .arg( Arg::with_name("no_check_vote_account") .long("no-check-vote-account") .takes_value(false) .conflicts_with("no_voting") .requires("entrypoint") .help("Skip the RPC vote account sanity check") ) .arg( Arg::with_name("dev_halt_at_slot") .long("dev-halt-at-slot") .value_name("SLOT") .validator(is_slot) .takes_value(true) .help("Halt the validator when it reaches the given slot"), ) .arg( Arg::with_name("rpc_port") .long("rpc-port") .value_name("PORT") .takes_value(true) .validator(port_validator) .help("Use this port for JSON RPC, and the next port for the RPC websocket"), ) .arg( Arg::with_name("private_rpc") .long("--private-rpc") .takes_value(false) .help("Do not publish the RPC port for use by other nodes") ) .arg( Arg::with_name("enable_rpc_exit") .long("enable-rpc-exit") .takes_value(false) .help("Enable the JSON RPC 'validatorExit' API. \ Only enable in a debug environment"), ) .arg( Arg::with_name("enable_rpc_set_log_filter") .long("enable-rpc-set-log-filter") .takes_value(false) .help("Enable the JSON RPC 'setLogFilter' API. \ Only enable in a debug environment"), ) .arg( Arg::with_name("enable_rpc_transaction_history") .long("enable-rpc-transaction-history") .takes_value(false) .help("Enable historical transaction info over JSON RPC, \ including the 'getConfirmedBlock' API. \ This will cause an increase in disk usage and IOPS"), ) .arg( Arg::with_name("health_check_slot_distance") .long("health-check-slot-distance") .value_name("SLOT_DISTANCE") .takes_value(true) .default_value("150") .help("If --trusted-validators are specified, report this validator healthy \ if its latest account hash is no further behind than this number of \ slots from the latest trusted validator account hash. \ If no --trusted-validators are specified, the validator will always \ report itself to be healthy") ) .arg( Arg::with_name("rpc_faucet_addr") .long("rpc-faucet-address") .value_name("HOST:PORT") .takes_value(true) .validator(solana_net_utils::is_host_port) .help("Enable the JSON RPC 'requestAirdrop' API with this faucet address."), ) .arg( Arg::with_name("signer_addr") .long("vote-signer-address") .value_name("HOST:PORT") .takes_value(true) .hidden(true) // Don't document this argument to discourage its use .validator(solana_net_utils::is_host_port) .help("Rendezvous with the vote signer at this RPC end point"), ) .arg( Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") .takes_value(true) .help("Comma separated persistent accounts location"), ) .arg( Arg::with_name("gossip_port") .long("gossip-port") .value_name("PORT") .takes_value(true) .help("Gossip port number for the node"), ) .arg( Arg::with_name("gossip_host") .long("gossip-host") .value_name("HOST") .takes_value(true) .conflicts_with("entrypoint") .validator(solana_net_utils::is_host) .help("IP address for the node to advertise in gossip when \ --entrypoint is not provided [default: 127.0.0.1]"), ) .arg( Arg::with_name("dynamic_port_range") .long("dynamic-port-range") .value_name("MIN_PORT-MAX_PORT") .takes_value(true) .default_value(default_dynamic_port_range) .validator(port_range_validator) .help("Range to use for dynamically assigned ports"), ) .arg( Arg::with_name("snapshot_interval_slots") .long("snapshot-interval-slots") .value_name("SNAPSHOT_INTERVAL_SLOTS") .takes_value(true) .default_value("100") .help("Number of slots between generating snapshots, \ 0 to disable snapshots"), ) .arg( Arg::with_name("accounts_hash_interval_slots") .long("accounts-hash-slots") .value_name("ACCOUNTS_HASH_INTERVAL_SLOTS") .takes_value(true) .default_value("100") .help("Number of slots between generating accounts hash."), ) .arg( Arg::with_name("snapshot_version") .long("snapshot-version") .value_name("SNAPSHOT_VERSION") .validator(is_parsable::<SnapshotVersion>) .takes_value(true) .default_value(SnapshotVersion::default().into()) .help("Output snapshot version"), ) .arg( Arg::with_name("limit_ledger_size") .long("limit-ledger-size") .value_name("SHRED_COUNT") .takes_value(true) .min_values(0) .max_values(1) .default_value(default_limit_ledger_size) .help("Keep this amount of shreds in root slots."), ) .arg( Arg::with_name("skip_poh_verify") .long("skip-poh-verify") .takes_value(false) .help("Skip ledger verification at node bootup"), ) .arg( Arg::with_name("cuda") .long("cuda") .takes_value(false) .help("Use CUDA"), ) .arg( Arg::with_name("expected_genesis_hash") .long("expected-genesis-hash") .value_name("HASH") .takes_value(true) .validator(hash_validator) .help("Require the genesis have this hash"), ) .arg( Arg::with_name("expected_shred_version") .long("expected-shred-version") .value_name("VERSION") .takes_value(true) .help("Require the shred version be this value"), ) .arg( Arg::with_name("logfile") .short("o") .long("log") .value_name("FILE") .takes_value(true) .help("Redirect logging to the specified file, '-' for standard error. \ Sending the SIGUSR1 signal to the validator process will cause it \ to re-open the log file"), ) .arg( Arg::with_name("wait_for_supermajority") .long("wait-for-supermajority") .value_name("SLOT") .validator(is_slot) .help("After processing the ledger and the next slot is SLOT, wait until a supermajority of stake is visible on gossip before starting PoH"), ) .arg( Arg::with_name("hard_forks") .long("hard-fork") .value_name("SLOT") .validator(is_slot) .multiple(true) .takes_value(true) .help("Add a hard fork at this slot"), ) .arg( Arg::with_name("trusted_validators") .long("trusted-validator") .validator(is_pubkey) .value_name("PUBKEY") .multiple(true) .takes_value(true) .help("A snapshot hash must be published in gossip by this validator to be accepted. \ May be specified multiple times. If unspecified any snapshot hash will be accepted"), ) .arg( Arg::with_name("no_untrusted_rpc") .long("no-untrusted-rpc") .takes_value(false) .help("Use the RPC service of trusted validators only") ) .arg( Arg::with_name("no_rocksdb_compaction") .long("no-rocksdb-compaction") .takes_value(false) .help("Disable manual compaction of the ledger database. May increase storage requirements.") ) .arg( Arg::with_name("bind_address") .long("bind-address") .value_name("HOST") .takes_value(true) .validator(solana_net_utils::is_host) .default_value("0.0.0.0") .help("IP address to bind the validator ports"), ) .arg( Arg::with_name("rpc_bind_address") .long("rpc-bind-address") .value_name("HOST") .takes_value(true) .validator(solana_net_utils::is_host) .help("IP address to bind the RPC port [default: use --bind-address]"), ) .arg( Arg::with_name("halt_on_trusted_validators_accounts_hash_mismatch") .long("halt-on-trusted-validators-accounts-hash-mismatch") .requires("trusted_validators") .takes_value(false) .help("Abort the validator if a bank hash mismatch is detected within trusted validator set"), ) .arg( Arg::with_name("frozen_accounts") .long("frozen-account") .validator(is_pubkey) .value_name("PUBKEY") .multiple(true) .takes_value(true) .help("Freeze the specified account. This will cause the validator to \ intentionally crash should any transaction modify the frozen account in any way \ other than increasing the account balance"), ) .arg( Arg::with_name("snapshot_compression") .long("snapshot-compression") .possible_values(&["bz2", "gzip", "zstd", "none"]) .value_name("COMPRESSION_TYPE") .takes_value(true) .help("Type of snapshot compression to use."), ) .arg( Arg::with_name("max_genesis_archive_unpacked_size") .long("max-genesis-archive-unpacked-size") .value_name("NUMBER") .takes_value(true) .default_value(&default_genesis_archive_unpacked_size) .help( "maximum total uncompressed file size of downloaded genesis archive", ), ) .get_matches(); let identity_keypair = Arc::new(keypair_of(&matches, "identity").unwrap_or_else(Keypair::new)); let authorized_voter_keypairs = keypairs_of(&matches, "authorized_voter_keypairs") .map(|keypairs| keypairs.into_iter().map(Arc::new).collect()) .unwrap_or_else(|| vec![identity_keypair.clone()]); let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); let init_complete_file = matches.value_of("init_complete_file"); let skip_poh_verify = matches.is_present("skip_poh_verify"); let cuda = matches.is_present("cuda"); let no_genesis_fetch = matches.is_present("no_genesis_fetch"); let no_snapshot_fetch = matches.is_present("no_snapshot_fetch"); let no_check_vote_account = matches.is_present("no_check_vote_account"); let private_rpc = matches.is_present("private_rpc"); let no_rocksdb_compaction = matches.is_present("no_rocksdb_compaction"); // Canonicalize ledger path to avoid issues with symlink creation let _ = fs::create_dir_all(&ledger_path); let ledger_path = fs::canonicalize(&ledger_path).unwrap_or_else(|err| { eprintln!("Unable to access ledger path: {:?}", err); exit(1); }); let no_untrusted_rpc = matches.is_present("no_untrusted_rpc"); let trusted_validators = if matches.is_present("trusted_validators") { let trusted_validators: HashSet<_> = values_t_or_exit!(matches, "trusted_validators", Pubkey) .into_iter() .collect(); if trusted_validators.contains(&identity_keypair.pubkey()) { eprintln!( "The validator's identity pubkey cannot be a --trusted-validator: {}", identity_keypair.pubkey() ); exit(1); } Some(trusted_validators) } else { None }; let mut validator_config = ValidatorConfig { dev_halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(), expected_genesis_hash: matches .value_of("expected_genesis_hash") .map(|s| Hash::from_str(&s).unwrap()), expected_shred_version: value_t!(matches, "expected_shred_version", u16).ok(), new_hard_forks: hardforks_of(&matches, "hard_forks"), rpc_config: JsonRpcConfig { enable_validator_exit: matches.is_present("enable_rpc_exit"), enable_set_log_filter: matches.is_present("enable_rpc_set_log_filter"), enable_rpc_transaction_history: matches.is_present("enable_rpc_transaction_history"), identity_pubkey: identity_keypair.pubkey(), faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| { solana_net_utils::parse_host_port(address).expect("failed to parse faucet address") }), health_check_slot_distance: value_t_or_exit!( matches, "health_check_slot_distance", u64 ), }, rpc_ports: value_t!(matches, "rpc_port", u16) .ok() .map(|rpc_port| (rpc_port, rpc_port + 1)), voting_disabled: matches.is_present("no_voting"), wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(), trusted_validators, frozen_accounts: values_t!(matches, "frozen_accounts", Pubkey).unwrap_or_default(), no_rocksdb_compaction, ..ValidatorConfig::default() }; let vote_account = pubkey_of(&matches, "vote_account").unwrap_or_else(|| { warn!("--vote-account not specified, validator will not vote"); validator_config.voting_disabled = true; Keypair::new().pubkey() }); let dynamic_port_range = solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap()) .expect("invalid dynamic_port_range"); let bind_address = solana_net_utils::parse_host(matches.value_of("bind_address").unwrap()) .expect("invalid bind_address"); let rpc_bind_address = if matches.is_present("rpc_bind_address") { solana_net_utils::parse_host(matches.value_of("rpc_bind_address").unwrap()) .expect("invalid rpc_bind_address") } else { bind_address }; let account_paths = if let Some(account_paths) = matches.value_of("account_paths") { account_paths.split(',').map(PathBuf::from).collect() } else { vec![ledger_path.join("accounts")] }; // Create and canonicalize account paths to avoid issues with symlink creation validator_config.account_paths = account_paths .into_iter() .map(|account_path| { match fs::create_dir_all(&account_path).and_then(|_| fs::canonicalize(&account_path)) { Ok(account_path) => account_path, Err(err) => { eprintln!( "Unable to access account path: {:?}, err: {:?}", account_path, err ); exit(1); } } }) .collect(); let snapshot_interval_slots = value_t_or_exit!(matches, "snapshot_interval_slots", u64); let snapshot_path = ledger_path.join("snapshot"); fs::create_dir_all(&snapshot_path).unwrap_or_else(|err| { eprintln!( "Failed to create snapshots directory {:?}: {}", snapshot_path, err ); exit(1); }); let mut snapshot_compression = CompressionType::Bzip2; if let Ok(compression_str) = value_t!(matches, "snapshot_compression", String) { match compression_str.as_str() { "bz2" => snapshot_compression = CompressionType::Bzip2, "gzip" => snapshot_compression = CompressionType::Gzip, "zstd" => snapshot_compression = CompressionType::Zstd, "none" => snapshot_compression = CompressionType::NoCompression, _ => panic!("Compression type not recognized: {}", compression_str), } } let snapshot_version = matches .value_of("snapshot_version") .map_or(SnapshotVersion::default(), |s| { s.parse::<SnapshotVersion>().unwrap_or_else(|err| { eprintln!("Error: {}", err); exit(1) }) }); validator_config.snapshot_config = Some(SnapshotConfig { snapshot_interval_slots: if snapshot_interval_slots > 0 { snapshot_interval_slots } else { std::u64::MAX }, snapshot_path, snapshot_package_output_path: ledger_path.clone(), compression: snapshot_compression, snapshot_version, }); validator_config.accounts_hash_interval_slots = value_t_or_exit!(matches, "accounts_hash_interval_slots", u64); if validator_config.accounts_hash_interval_slots == 0 { eprintln!("Accounts hash interval should not be 0."); exit(1); } if is_snapshot_config_invalid( snapshot_interval_slots, validator_config.accounts_hash_interval_slots, ) { eprintln!("Invalid snapshot interval provided ({}), must be a multiple of accounts_hash_interval_slots ({})", snapshot_interval_slots, validator_config.accounts_hash_interval_slots, ); exit(1); } if matches.is_present("limit_ledger_size") { let limit_ledger_size = value_t_or_exit!(matches, "limit_ledger_size", u64); if limit_ledger_size < DEFAULT_MIN_MAX_LEDGER_SHREDS { eprintln!( "The provided --limit-ledger-size value was too small, the minimum value is {}", DEFAULT_MIN_MAX_LEDGER_SHREDS ); exit(1); } validator_config.max_ledger_shreds = Some(limit_ledger_size); } if matches.is_present("halt_on_trusted_validators_accounts_hash_mismatch") { validator_config.halt_on_trusted_validators_accounts_hash_mismatch = true; } if matches.value_of("signer_addr").is_some() { warn!("--vote-signer-address ignored"); } let logfile = { let logfile = matches .value_of("logfile") .map(|s| s.into()) .unwrap_or_else(|| format!("solana-validator-{}.log", identity_keypair.pubkey())); if logfile == "-" { None } else { println!("log file: {}", logfile); Some(logfile) } }; let _logger_thread = start_logger(logfile); // Default to RUST_BACKTRACE=1 for more informative validator logs if env::var_os("RUST_BACKTRACE").is_none() { env::set_var("RUST_BACKTRACE", "1") } info!("{} {}", crate_name!(), solana_version::version!()); info!("Starting validator with: {:#?}", std::env::args_os()); solana_metrics::set_host_id(identity_keypair.pubkey().to_string()); solana_metrics::set_panic_hook("validator"); if cuda { solana_perf::perf_libs::init_cuda(); enable_recycler_warming(); } solana_ledger::entry::init_poh(); let entrypoint_addr = matches.value_of("entrypoint").map(|entrypoint| { solana_net_utils::parse_host_port(entrypoint).unwrap_or_else(|e| { eprintln!("failed to parse entrypoint address: {}", e); exit(1); }) }); let gossip_host = if let Some(entrypoint_addr) = entrypoint_addr { let ip_addr = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| { eprintln!( "Failed to contact cluster entrypoint {}: {}", entrypoint_addr, err ); exit(1); }); info!( "{} reports the IP address for this machine as {}", entrypoint_addr, ip_addr ); ip_addr } else { solana_net_utils::parse_host(matches.value_of("gossip_host").unwrap_or("127.0.0.1")) .unwrap_or_else(|err| { eprintln!("Error: {}", err); exit(1); }) }; let gossip_addr = SocketAddr::new( gossip_host, value_t!(matches, "gossip_port", u16).unwrap_or_else(|_| { solana_net_utils::find_available_port_in_range(bind_address, (0, 1)).unwrap_or_else( |err| { eprintln!("Unable to find an available gossip port: {}", err); exit(1); }, ) }), ); let max_genesis_archive_unpacked_size = value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64); let cluster_entrypoint = entrypoint_addr .as_ref() .map(ContactInfo::new_gossip_entry_point); let mut node = Node::new_with_external_ip( &identity_keypair.pubkey(), &gossip_addr, dynamic_port_range, bind_address, ); if !private_rpc { if let Some((rpc_port, rpc_pubsub_port)) = validator_config.rpc_ports { node.info.rpc = SocketAddr::new(node.info.gossip.ip(), rpc_port); node.info.rpc_pubsub = SocketAddr::new(node.info.gossip.ip(), rpc_pubsub_port); } } if let Some(ref cluster_entrypoint) = cluster_entrypoint { let mut udp_sockets = vec![ &node.sockets.gossip, &node.sockets.repair, &node.sockets.serve_repair, ]; udp_sockets.extend(node.sockets.tpu.iter()); udp_sockets.extend(node.sockets.tpu_forwards.iter()); udp_sockets.extend(node.sockets.tvu.iter()); udp_sockets.extend(node.sockets.tvu_forwards.iter()); udp_sockets.extend(node.sockets.broadcast.iter()); udp_sockets.extend(node.sockets.retransmit_sockets.iter()); let mut tcp_listeners = vec![]; if !private_rpc { if let Some((rpc_port, rpc_pubsub_port)) = validator_config.rpc_ports { for (purpose, port) in &[("RPC", rpc_port), ("RPC pubsub", rpc_pubsub_port)] { tcp_listeners.push(( *port, TcpListener::bind(&SocketAddr::from((rpc_bind_address, *port))) .unwrap_or_else(|err| { error!("Unable to bind to tcp/{} for {}: {}", port, purpose, err); exit(1); }), )); } } } if let Some(ip_echo) = &node.sockets.ip_echo { let ip_echo = ip_echo.try_clone().expect("unable to clone tcp_listener"); tcp_listeners.push((node.info.gossip.port(), ip_echo)); } if !solana_net_utils::verify_reachable_ports( &cluster_entrypoint.gossip, tcp_listeners, &udp_sockets, ) { exit(1); } if !no_genesis_fetch { let (cluster_info, gossip_exit_flag, gossip_service) = start_gossip_node( &identity_keypair, &cluster_entrypoint.gossip, &node.info.gossip, node.sockets.gossip.try_clone().unwrap(), validator_config.expected_shred_version, ); let mut blacklisted_rpc_nodes = HashSet::new(); loop { let (rpc_contact_info, snapshot_hash) = get_rpc_node( &cluster_info, &validator_config, &mut blacklisted_rpc_nodes, no_snapshot_fetch, no_untrusted_rpc, ); info!( "Using RPC service from node {}: {:?}", rpc_contact_info.id, rpc_contact_info.rpc ); let rpc_client = RpcClient::new_socket(rpc_contact_info.rpc); let result = match rpc_client.get_version() { Ok(rpc_version) => { info!("RPC node version: {}", rpc_version.solana_core); Ok(()) } Err(err) => Err(format!("Failed to get RPC node version: {}", err)), } .and_then(|_| { let genesis_hash = download_then_check_genesis_hash( &rpc_contact_info.rpc, &ledger_path, validator_config.expected_genesis_hash, max_genesis_archive_unpacked_size, ); if let Ok(genesis_hash) = genesis_hash { if validator_config.expected_genesis_hash.is_none() { info!("Expected genesis hash set to {}", genesis_hash); validator_config.expected_genesis_hash = Some(genesis_hash); } } genesis_hash }) .and_then(|_| { if let Some(expected_genesis_hash) = validator_config.expected_genesis_hash { // Sanity check that the RPC node is using the expected genesis hash before // downloading a snapshot from it let rpc_genesis_hash = rpc_client .get_genesis_hash() .map_err(|err| format!("Failed to get genesis hash: {}", err))?; if expected_genesis_hash != rpc_genesis_hash { return Err(format!("Genesis hash mismatch: expected {} but RPC node genesis hash is {}", expected_genesis_hash, rpc_genesis_hash)); } } Ok(()) }) .and_then(|_| { if let Some(snapshot_hash) = snapshot_hash { rpc_client.get_slot_with_commitment(CommitmentConfig::root()) .map_err(|err| format!("Failed to get RPC node slot: {}", err)) .and_then(|slot| { info!("RPC node root slot: {}", slot); download_snapshot(&rpc_contact_info.rpc, &ledger_path, snapshot_hash) }) } else { Ok(()) } }) .map(|_| { if !validator_config.voting_disabled && !no_check_vote_account { check_vote_account( &rpc_client, &identity_keypair.pubkey(), &vote_account, &authorized_voter_keypairs.iter().map(|k| k.pubkey()).collect::<Vec<_>>(), ).unwrap_or_else(|err| { // Consider failures here to be more likely due to user error (eg, // incorrect `solana-validator` command-line arguments) rather than the // RPC node failing. // // Power users can always use the `--no-check-vote-account` option to // bypass this check entirely error!("{}", err); exit(1); }); } }); if result.is_ok() { break; } warn!("{}", result.unwrap_err()); if let Some(ref trusted_validators) = validator_config.trusted_validators { if trusted_validators.contains(&rpc_contact_info.id) { continue; // Never blacklist a trusted node } } info!( "Excluding {} as a future RPC candidate", rpc_contact_info.id ); blacklisted_rpc_nodes.insert(rpc_contact_info.id); } gossip_exit_flag.store(true, Ordering::Relaxed); gossip_service.join().unwrap(); } } if !ledger_path.is_dir() { error!( "ledger directory does not exist or is not accessible: {:?}", ledger_path ); exit(1); } let validator = Validator::new( node, &identity_keypair, &ledger_path, &vote_account, authorized_voter_keypairs, cluster_entrypoint.as_ref(), !skip_poh_verify, &validator_config, ); if let Some(filename) = init_complete_file { File::create(filename).unwrap_or_else(|_| { error!("Unable to create: {}", filename); exit(1); }); } info!("Validator initialized"); validator.join().expect("validator exit"); info!("Validator exiting.."); } #[cfg(test)] pub mod tests { use super::*; #[test] fn test_interval_check() { assert!(!is_snapshot_config_invalid(0, 100)); assert!(is_snapshot_config_invalid(1, 100)); assert!(is_snapshot_config_invalid(230, 100)); assert!(!is_snapshot_config_invalid(500, 100)); assert!(!is_snapshot_config_invalid(5, 5)); } }
38.218563
157
0.546357
8fb517fb5bd2f24d937b70c26e78645b71f3f47f
94
#![deny(warnings, rust_2018_idioms)] #![forbid(unsafe_code)] mod gen; pub use self::gen::*;
13.428571
36
0.680851
f70a813586263de672012189b6fa821670bec97d
5,707
// Generated from definition io.k8s.api.core.v1.EndpointsList /// EndpointsList is a list of endpoints. #[derive(Clone, Debug, Default, PartialEq)] pub struct EndpointsList { /// List of endpoints. pub items: Vec<crate::v1_9::api::core::v1::Endpoints>, /// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds pub metadata: Option<crate::v1_9::apimachinery::pkg::apis::meta::v1::ListMeta>, } impl crate::Resource for EndpointsList { fn api_version() -> &'static str { "v1" } fn group() -> &'static str { "" } fn kind() -> &'static str { "EndpointsList" } fn version() -> &'static str { "v1" } } impl crate::Metadata for EndpointsList { type Ty = crate::v1_9::apimachinery::pkg::apis::meta::v1::ListMeta; fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> { self.metadata.as_ref() } } impl<'de> serde::Deserialize<'de> for EndpointsList { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_items, Key_metadata, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "items" => Field::Key_items, "metadata" => Field::Key_metadata, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = EndpointsList; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct EndpointsList") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_items: Option<Vec<crate::v1_9::api::core::v1::Endpoints>> = None; let mut value_metadata: Option<crate::v1_9::apimachinery::pkg::apis::meta::v1::ListMeta> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::api_version() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version())); } }, Field::Key_kind => { let value_kind: String = serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::kind() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind())); } }, Field::Key_items => value_items = Some(serde::de::MapAccess::next_value(&mut map)?), Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(EndpointsList { items: value_items.ok_or_else(|| serde::de::Error::missing_field("items"))?, metadata: value_metadata, }) } } deserializer.deserialize_struct( "EndpointsList", &[ "apiVersion", "kind", "items", "metadata", ], Visitor, ) } } impl serde::Serialize for EndpointsList { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "EndpointsList", 3 + self.metadata.as_ref().map_or(0, |_| 1), )?; serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?; serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?; serde::ser::SerializeStruct::serialize_field(&mut state, "items", &self.items)?; if let Some(value) = &self.metadata { serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?; } serde::ser::SerializeStruct::end(state) } }
39.358621
174
0.511477
eb54da807291976325faefa1c616224d43c8ade7
6,049
#![cfg(feature = "test-bpf")] mod helpers; use { borsh::BorshDeserialize, helpers::*, solana_program::{instruction::InstructionError, pubkey::Pubkey}, solana_program_test::*, solana_sdk::{ signature::{Keypair, Signer}, transaction::TransactionError, }, spl_stake_pool::{error::StakePoolError, state::StakePool}, }; async fn setup() -> ( ProgramTestContext, StakePoolAccounts, Vec<ValidatorStakeAccount>, ) { let mut context = program_test().start_with_context().await; let stake_pool_accounts = StakePoolAccounts::new(); stake_pool_accounts .initialize_stake_pool( &mut context.banks_client, &context.payer, &context.last_blockhash, ) .await .unwrap(); // Add several accounts let mut stake_accounts: Vec<ValidatorStakeAccount> = vec![]; const STAKE_ACCOUNTS: u64 = 3; for _ in 0..STAKE_ACCOUNTS { stake_accounts.push( simple_add_validator_to_pool( &mut context.banks_client, &context.payer, &context.last_blockhash, &stake_pool_accounts, ) .await, ); } (context, stake_pool_accounts, stake_accounts) } #[tokio::test] async fn success() { let (mut context, stake_pool_accounts, stake_accounts) = setup().await; let error = stake_pool_accounts .update_stake_pool_balance( &mut context.banks_client, &context.payer, &context.last_blockhash, ) .await; assert!(error.is_none()); // Add extra funds, simulating rewards const EXTRA_STAKE_AMOUNT: u64 = 1_000_000; for stake_account in &stake_accounts { transfer( &mut context.banks_client, &context.payer, &context.last_blockhash, &stake_account.stake_account, EXTRA_STAKE_AMOUNT, ) .await; } let before_balance = get_validator_list_sum( &mut context.banks_client, &stake_pool_accounts.validator_list.pubkey(), ) .await; // Update epoch context.warp_to_slot(50_000).unwrap(); // Update list and pool let error = stake_pool_accounts .update_validator_list_balance( &mut context.banks_client, &context.payer, &context.last_blockhash, stake_accounts .iter() .map(|v| v.stake_account) .collect::<Vec<Pubkey>>() .as_slice(), ) .await; assert!(error.is_none()); let error = stake_pool_accounts .update_stake_pool_balance( &mut context.banks_client, &context.payer, &context.last_blockhash, ) .await; assert!(error.is_none()); // Check fee let after_balance = get_validator_list_sum( &mut context.banks_client, &stake_pool_accounts.validator_list.pubkey(), ) .await; let actual_fee = get_token_balance( &mut context.banks_client, &stake_pool_accounts.pool_fee_account.pubkey(), ) .await; let pool_token_supply = get_token_supply( &mut context.banks_client, &stake_pool_accounts.pool_mint.pubkey(), ) .await; let stake_pool_info = get_account( &mut context.banks_client, &stake_pool_accounts.stake_pool.pubkey(), ) .await; let stake_pool = StakePool::try_from_slice(&stake_pool_info.data).unwrap(); let expected_fee = stake_pool .calc_fee_amount(after_balance - before_balance) .unwrap(); assert_eq!(actual_fee, expected_fee); assert_eq!(pool_token_supply, stake_pool.pool_token_supply); } #[tokio::test] async fn fail_with_wrong_validator_list() { let (mut banks_client, payer, recent_blockhash) = program_test().start().await; let mut stake_pool_accounts = StakePoolAccounts::new(); stake_pool_accounts .initialize_stake_pool(&mut banks_client, &payer, &recent_blockhash) .await .unwrap(); let wrong_validator_list = Keypair::new(); stake_pool_accounts.validator_list = wrong_validator_list; let error = stake_pool_accounts .update_stake_pool_balance(&mut banks_client, &payer, &recent_blockhash) .await .unwrap() .unwrap(); match error { TransactionError::InstructionError( _, InstructionError::Custom(error_index), ) => { let program_error = StakePoolError::InvalidValidatorStakeList as u32; assert_eq!(error_index, program_error); } _ => panic!("Wrong error occurs while try to update pool balance with wrong validator stake list account"), } } #[tokio::test] async fn fail_with_wrong_pool_fee_account() { let (mut banks_client, payer, recent_blockhash) = program_test().start().await; let mut stake_pool_accounts = StakePoolAccounts::new(); stake_pool_accounts .initialize_stake_pool(&mut banks_client, &payer, &recent_blockhash) .await .unwrap(); let wrong_fee_account = Keypair::new(); stake_pool_accounts.pool_fee_account = wrong_fee_account; let error = stake_pool_accounts .update_stake_pool_balance(&mut banks_client, &payer, &recent_blockhash) .await .unwrap() .unwrap(); match error { TransactionError::InstructionError( _, InstructionError::Custom(error_index), ) => { let program_error = StakePoolError::InvalidFeeAccount as u32; assert_eq!(error_index, program_error); } _ => panic!("Wrong error occurs while try to update pool balance with wrong validator stake list account"), } } #[tokio::test] async fn test_update_stake_pool_balance_with_uninitialized_validator_list() {} // TODO #[tokio::test] async fn test_update_stake_pool_balance_with_out_of_dated_validators_balances() {} // TODO
29.79803
115
0.632336
6a65fb4c92819550da7d5f0df397c7885ad83800
1,089
//! A set of logging macros that print not only timestamp and log level, //! but also filename, line and column. //! //! They behave just like usual log::warn, log::info, etc. //! //! //! In fact, printing file, line and column can be done with a custom formatter for env_logger, like so: //! //!```ignore //! use env_logger::Builder; //! use std::io::Write; //! //! env_logger::builder() //! .format(|buf, record| { //! writeln!(buf, "{:?}", record.file()); //! writeln!(buf, "{}", record.args()) //! }) //! .init(); //!``` //! //! But I couldn't easily replicate its default behavior in my custom logger. //! #[macro_export] macro_rules! warn { ($($arg:tt)*) => { log::warn!( "[{}:{}:{}] {}", file!(), line!(), column!(), format!($($arg)*) ); }; } #[macro_export] macro_rules! error { ($($arg:tt)*) => { log::error!( "[{}:{}:{}] {}", file!(), line!(), column!(), format!($($arg)*) ); }; }
22.22449
104
0.461892
d92e372f06114d4fe215d2cbba697288a135e475
62,910
//! Load commands tell the kernel and dynamic linker anything from how to load this binary into memory, what the entry point is, apple specific information, to which libraries it requires for dynamic linking use error; use core::fmt::{self, Display}; use scroll::{self, ctx, Endian, Pread}; /////////////////////////////////////// // Load Commands from mach-o/loader.h // with some rusty additions ////////////////////////////////////// #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, SizeWith)] /// Occurs at the beginning of every load command to serve as a sort of tagged union/enum discriminant pub struct LoadCommandHeader { pub cmd: u32, pub cmdsize: u32, } impl Display for LoadCommandHeader { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "LoadCommandHeader: {} size: {}", cmd_to_str(self.cmd), self.cmdsize) } } pub const SIZEOF_LOAD_COMMAND: usize = 8; pub type LcStr = u32; pub const SIZEOF_LC_STR: usize = 4; #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct Section32 { /// name of this section pub sectname: [u8; 16], /// segment this section goes in pub segname: [u8; 16], /// memory address of this section pub addr: u32, /// size in bytes of this section pub size: u32, /// file offset of this section pub offset: u32, /// section alignment (power of 2) pub align: u32, /// file offset of relocation entries pub reloff: u32, /// number of relocation entries pub nreloc: u32, /// flags (section type and attributes) pub flags: u32, /// reserved (for offset or index) pub reserved1: u32, /// reserved (for count or sizeof) pub reserved2: u32, } pub const SIZEOF_SECTION_32: usize = 68; /// for 64-bit architectures #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct Section64 { /// name of this section pub sectname: [u8; 16], /// segment this section goes in pub segname: [u8; 16], /// memory address of this section pub addr: u64, /// size in bytes of this section pub size: u64, /// file offset of this section pub offset: u32, /// section alignment (power of 2) pub align: u32, /// file offset of relocation entries pub reloff: u32, /// number of relocation entries pub nreloc: u32, /// flags (section type and attributes pub flags: u32, /// reserved (for offset or index) pub reserved1: u32, /// reserved (for count or sizeof) pub reserved2: u32, /// reserved pub reserved3: u32, } pub const SIZEOF_SECTION_64: usize = 80; #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SegmentCommand32 { pub cmd: u32, pub cmdsize: u32, pub segname: [u8; 16], pub vmaddr: u32, pub vmsize: u32, pub fileoff: u32, pub filesize: u32, pub maxprot: u32, pub initprot: u32, pub nsects: u32, pub flags: u32, } pub const SIZEOF_SEGMENT_COMMAND_32: usize = 56; impl SegmentCommand32 { pub fn name(&self) -> error::Result<&str> { Ok(self.segname.pread::<&str>(0)?) } } #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SegmentCommand64 { pub cmd: u32, pub cmdsize: u32, pub segname: [u8; 16], pub vmaddr: u64, pub vmsize: u64, pub fileoff: u64, pub filesize: u64, pub maxprot: u32, pub initprot: u32, pub nsects: u32, pub flags: u32, } pub const SIZEOF_SEGMENT_COMMAND_64: usize = 72; impl SegmentCommand64 { pub fn name(&self) -> error::Result<&str> { Ok(self.segname.pread::<&str>(0)?) } } /// Fixed virtual memory shared libraries are identified by two things. The /// target pathname (the name of the library as found for execution), and the /// minor version number. The address of where the headers are loaded is in /// header_addr. (THIS IS OBSOLETE and no longer supported). #[repr(packed)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct Fvmlib { /// library's target pathname pub name: u32, /// library's minor version number pub minor_version: u32, /// library's header address pub header_addr: u32, } pub const SIZEOF_FVMLIB: usize = 12; /// A fixed virtual shared library (fipub constype == MH_FVMLIB in the mach header) /// contains a fvmlib_command (cmd == LC_IDFVMLIB) to identify the library. /// An object that uses a fixed virtual shared library also contains a /// fvmlib_command (cmd == LC_LOADFVMLIB) for each library it uses. /// (THIS IS OBSOLETE and no longer supported). #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct FvmlibCommand { /// LC_IDFVMLIB or LC_LOADFVMLIB pub cmd: u32, /// includes pathname string pub cmdsize: u32, /// the library identification pub fvmlib: Fvmlib, } pub const SIZEOF_FVMLIB_COMMAND: usize = 20; // /// Dynamicly linked shared libraries are identified by two things. The // /// pathname (the name of the library as found for execution), and the // /// compatibility version number. The pathname must match and the compatibility // /// number in the user of the library must be greater than or equal to the // /// library being used. The time stamp is used to record the time a library was // /// built and copied into user so it can be use to determined if the library used // /// at runtime is exactly the same as used to built the program. // struct dylib { // union lc_str name; // library's path name // uint32_t timestamp; // library's build time stamp // uint32_t current_version; // library's current version number // uint32_t compatibility_version; // library's compatibility vers number // } /// A dynamically linked shared library (fipub constype == MH_DYLIB in the mach header) /// contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library. /// An object that uses a dynamically linked shared library also contains a /// dylib_command (cmd == LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, or /// LC_REEXPORT_DYLIB) for each library it uses. #[repr(packed)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct Dylib { /// library's path name pub name: LcStr, /// library's build time stamp pub timestamp: u32, /// library's current version number pub current_version: u32, /// library's compatibility vers number pub compatibility_version: u32, } pub const SIZEOF_DYLIB: usize = 16; #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylibCommand { /// LC_ID_DYLIB, LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB pub cmd: u32, /// includes pathname string pub cmdsize: u32, /// the library identification pub dylib: Dylib, } pub const SIZEOF_DYLIB_COMMAND: usize = 20; /// A dynamically linked shared library may be a subframework of an umbrella /// framework. If so it will be linked with "-umbrella umbrella_name" where /// Where "umbrella_name" is the name of the umbrella framework. A subframework /// can only be linked against by its umbrella framework or other subframeworks /// that are part of the same umbrella framework. Otherwise the static link /// editor produces an error and states to link against the umbrella framework. /// The name of the umbrella framework for subframeworks is recorded in the /// following structure. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SubFrameworkCommand { /// LC_SUB_FRAMEWORK pub cmd: u32, /// includes umbrella string pub cmdsize: u32, /// the umbrella framework name pub umbrella: u32, } pub const SIZEOF_SUB_FRAMEWORK_COMMAND: usize = 12; /// For dynamically linked shared libraries that are subframework of an umbrella /// framework they can allow clients other than the umbrella framework or other /// subframeworks in the same umbrella framework. To do this the subframework /// is built with "-allowable_client client_name" and an LC_SUB_CLIENT load /// command is created for each -allowable_client flag. The client_name is /// usually a framework name. It can also be a name used for bundles clients /// where the bundle is built with "-client_name client_name". #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SubClientCommand { /// LC_SUB_CLIENT pub cmd: u32, /// includes client string pub cmdsize: u32, /// the client name pub client: LcStr, } pub const SIZEOF_SUB_CLIENT_COMMAND: usize = 12; /// A dynamically linked shared library may be a sub_umbrella of an umbrella /// framework. If so it will be linked with "-sub_umbrella umbrella_name" where /// Where "umbrella_name" is the name of the sub_umbrella framework. When /// staticly linking when -twolevel_namespace is in effect a twolevel namespace /// umbrella framework will only cause its subframeworks and those frameworks /// listed as sub_umbrella frameworks to be implicited linked in. Any other /// dependent dynamic libraries will not be linked it when -twolevel_namespace /// is in effect. The primary library recorded by the static linker when /// resolving a symbol in these libraries will be the umbrella framework. /// Zero or more sub_umbrella frameworks may be use by an umbrella framework. /// The name of a sub_umbrella framework is recorded in the following structure. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SubUmbrellaCommand { /// LC_SUB_UMBRELLA pub cmd: u32, /// includes sub_umbrella string pub cmdsize: u32, /// the sub_umbrella framework name pub sub_umbrella: LcStr, } pub const SIZEOF_SUB_UMBRELLA_COMMAND: usize = 12; /// A dynamically linked shared library may be a sub_library of another shared /// library. If so it will be linked with "-sub_library library_name" where /// Where "library_name" is the name of the sub_library shared library. When /// staticly linking when -twolevel_namespace is in effect a twolevel namespace /// shared library will only cause its subframeworks and those frameworks /// listed as sub_umbrella frameworks and libraries listed as sub_libraries to /// be implicited linked in. Any other dependent dynamic libraries will not be /// linked it when -twolevel_namespace is in effect. The primary library /// recorded by the static linker when resolving a symbol in these libraries /// will be the umbrella framework (or dynamic library). Zero or more sub_library /// shared libraries may be use by an umbrella framework or (or dynamic library). /// The name of a sub_library framework is recorded in the following structure. /// For example /usr/lib/libobjc_profile.A.dylib would be recorded as "libobjc". #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SubLibraryCommand { /// LC_SUB_LIBRARY pub cmd: u32, /// includes sub_library string pub cmdsize: u32, /// the sub_library name pub sub_library: LcStr, } pub const SIZEOF_SUB_LIBRARY_COMMAND: usize = 12; /// A program (type == MH_EXECUTE) that is /// prebound to its dynamic libraries has one of these for each library that /// the static linker used in prebinding. It contains a bit vector for the /// modules in the library. The bits indicate which modules are bound (1) and /// which are not (0) from the library. The bit for module 0 is the low bit /// of the first byte. So the bit for the Nth module is: /// (linked_modules[N/8] >> N%8) & 1 #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct PreboundDylibCommand { /// LC_PREBOUND_DYLIB pub cmd: u32, /// includes strings pub cmdsize: u32, /// library's path name pub name: LcStr, /// number of modules in library pub nmodules: u32, /// bit vector of linked modules // TODO: fixme pub linked_modules: LcStr, } pub const SIZEOF_PREBOUND_DYLIB_COMMAND: usize = 20; /// The name of the dynamic linker #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylinkerCommand { pub cmd: u32, pub cmdsize: u32, pub name: LcStr, } pub const SIZEOF_DYLINKER_COMMAND: usize = 12; /// Thread commands contain machine-specific data structures suitable for /// use in the thread state primitives. The machine specific data structures /// follow the struct thread_command as follows. /// Each flavor of machine specific data structure is preceded by an unsigned /// long constant for the flavor of that data structure, an uint32_t /// that is the count of longs of the size of the state data structure and then /// the state data structure follows. This triple may be repeated for many /// flavors. The constants for the flavors, counts and state data structure /// definitions are expected to be in the header file <machine/thread_status.h>. /// These machine specific data structures sizes must be multiples of /// 4 bytes The cmdsize reflects the total size of the thread_command /// and all of the sizes of the constants for the flavors, counts and state /// data structures. /// /// For executable objects that are unix processes there will be one /// thread_command (cmd == LC_UNIXTHREAD) created for it by the link-editor. /// This is the same as a LC_THREAD, except that a stack is automatically /// created (based on the shell's limit for the stack size). CommandVariant arguments /// and environment variables are copied onto that stack. // unimplemented, see machine/thread_status.h for rest of values: // uint32_t flavor flavor of thread state // uint32_t count count of longs in thread state // struct XXX_thread_state state thread state for this flavor // ... #[repr(C)] #[derive(Copy)] pub struct ThreadCommand { /// LC_THREAD or LC_UNIXTHREAD pub cmd: u32, /// total size of this command pub cmdsize: u32, /// flavor of thread state (but you also need to know the `cputype`) pub flavor: u32, /// number of elements in `thread_state` that are valid pub count: u32, /// The raw thread state, details of which varies by CPU pub thread_state: [u32; 70], } impl ThreadCommand { pub fn instruction_pointer(&self, cputype: super::cputype::CpuType) -> error::Result<u64> { // The thread command includes a `flavor` value which distinguishes between related thread // states. However, `dyld` ignores this entirely, blindly interpreting the thread state // values as a machine-specific set of registers matching the build configuration of the // active `dyld` binary. // // Really the only thing that `dyld` cares is that the Mach header's `cputype`, so that's // what we use here. match cputype { super::cputype::CPU_TYPE_X86 => { // struct i386_thread_state_t { // uint32_t eax; // uint32_t ebx; // uint32_t ecx; // uint32_t edx; // uint32_t edi; // uint32_t esi; // uint32_t ebp; // uint32_t esp; // uint32_t ss; // uint32_t eflags; // uint32_t eip; // uint32_t cs; // uint32_t ds; // uint32_t es; // uint32_t fs; // uint32_t gs; // } let eip: u32 = self.thread_state[10]; Ok(eip as u64) }, super::cputype::CPU_TYPE_X86_64 => { // struct x86_thread_state64_t { // uint64_t rax; // uint64_t rbx; // uint64_t rcx; // uint64_t rdx; // uint64_t rdi; // uint64_t rsi; // uint64_t rbp; // uint64_t rsp; // uint64_t r8; // uint64_t r9; // uint64_t r10; // uint64_t r11; // uint64_t r12; // uint64_t r13; // uint64_t r14; // uint64_t r15; // uint64_t rip; // uint64_t rflags; // uint64_t cs; // uint64_t fs; // uint64_t gs; // } let rip: u64 = (self.thread_state[32] as u64) | ((self.thread_state[33] as u64) << 32); Ok(rip) } super::cputype::CPU_TYPE_ARM => { // struct arm_thread_state32_t { // uint32_t r[13]; // uint32_t sp; // uint32_t lr; // uint32_t pc; // uint32_t cpsr; // } let pc: u32 = self.thread_state[15]; Ok(pc as u64) } super::cputype::CPU_TYPE_ARM64 => { // struct arm_thread_state64_t { // uint64_t x[29]; // uint64_t fp; // uint64_t lr; // uint64_t sp; // uint64_t pc; // uint32_t cpsr; // uint32_t pad; // } let pc: u64 = (self.thread_state[64] as u64) | ((self.thread_state[65] as u64) << 32); Ok(pc) } // https://github.com/m4b/goblin/issues/64 // Probably a G4 super::cputype::CPU_TYPE_POWERPC => { Ok(self.thread_state[0] as u64) }, // I think the G5 was the last motorola powerpc processor used by apple before switching to intel cpus. // unfortunately I don't have any binaries on hand to see what its thread state looks like :/ // super::cputype::CPU_TYPE_POWERPC64 => { // } // Assuming above is added, I don't believe apple ever ported mach-o the mach kernel // (and hence its binary format) to any other machines except the above, // but I would be happy to learn otherwise _ => { Err(error::Error::Malformed(format!("unable to find instruction pointer for cputype {:?}", cputype))) } } } } impl<'a> ctx::TryFromCtx<'a, Endian> for ThreadCommand { type Error = ::error::Error; type Size = usize; fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, Self::Size)> { use scroll::{Pread}; let lc = bytes.pread_with::<LoadCommandHeader>(0, le)?; // read the thread state flavor and length of the thread state let flavor: u32 = bytes.pread_with(8, le)?; let count: u32 = bytes.pread_with(12, le)?; // get a byte slice of the thread state let thread_state_byte_length = count as usize * 4; let thread_state_bytes = &bytes[16..16+thread_state_byte_length]; // check the length if thread_state_bytes.len() < thread_state_byte_length { return Err(error::Error::Malformed(format!("thread command specifies {} bytes for thread state but has only {}", thread_state_byte_length, thread_state_bytes.len()))); } if count > 70 { return Err(error::Error::Malformed(format!("thread command specifies {} longs for thread state but we handle only 70", count))); } // read the thread state let mut thread_state: [u32; 70] = [ 0; 70 ]; for i in 0..count as usize { thread_state[i] = thread_state_bytes.pread_with(i*4, le)?; } Ok((ThreadCommand{ cmd: lc.cmd, cmdsize: lc.cmdsize, flavor: flavor, count: count, thread_state: thread_state, }, lc.cmdsize as _)) } } impl Clone for ThreadCommand { fn clone(&self) -> Self { *self } } impl fmt::Debug for ThreadCommand { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("ThreadCommand") .field("cmd", &self.cmd) .field("cmdsize", &self.cmdsize) .field("flavor", &self.flavor) .field("count", &self.count) .field("thread_state", &&self.thread_state[..]) .finish() } } /// The routines command contains the address of the dynamic shared library /// initialization routine and an index into the module table for the module /// that defines the routine. Before any modules are used from the library the /// dynamic linker fully binds the module that defines the initialization routine /// and then calls it. This gets called before any module initialization /// routines (used for C++ static constructors) in the library. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct RoutinesCommand32 { /// LC_ROUTINES pub cmd: u32, /// total size of this command pub cmdsize: u32, /// address of initialization routine pub init_address:u32, /// index into the module table that the init routine is defined in pub init_module: u32, pub reserved1: u32, pub reserved2: u32, pub reserved3: u32, pub reserved4: u32, pub reserved5: u32, pub reserved6: u32, } /// The 64-bit routines command. Same use as above. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct RoutinesCommand64 { /// LC_ROUTINES_64 pub cmd: u32, /// total size of this command pub cmdsize: u32, /// address of initialization routine pub init_address: u64, /// index into the module table that the init routine is defined in 8 bytes each pub init_module: u64, pub reserved1: u64, pub reserved2: u64, pub reserved3: u64, pub reserved4: u64, pub reserved5: u64, pub reserved6: u64, } #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SymtabCommand { pub cmd: u32, pub cmdsize: u32, pub symoff: u32, pub nsyms: u32, pub stroff: u32, pub strsize: u32, } impl SymtabCommand { pub fn new() -> Self { SymtabCommand { cmd: LC_SYMTAB, cmdsize: SIZEOF_SYMTAB_COMMAND as u32, symoff: 0, nsyms: 0, stroff: 0, strsize: 0, } } } pub const SIZEOF_SYMTAB_COMMAND: usize = 24; /// This is the second set of the symbolic information which is used to support /// the data structures for the dynamically link editor. /// /// The original set of symbolic information in the symtab_command which contains /// the symbol and string tables must also be present when this load command is /// present. When this load command is present the symbol table is organized /// into three groups of symbols: /// local symbols (static and debugging symbols) - grouped by module /// defined external symbols - grouped by module (sorted by name if not lib) /// undefined external symbols (sorted by name if MH_BINDATLOAD is not set, /// and in order the were seen by the static /// linker if MH_BINDATLOAD is set) /// In this load command there are offsets and counts to each of the three groups /// of symbols. /// /// This load command contains a the offsets and sizes of the following new /// symbolic information tables: /// table of contents /// module table /// reference symbol table /// indirect symbol table /// The first three tables above (the table of contents, module table and /// reference symbol table) are only present if the file is a dynamically linked /// shared library. For executable and object modules, which are files /// containing only one module, the information that would be in these three /// tables is determined as follows: /// table of contents - the defined external symbols are sorted by name /// module table - the file contains only one module so everything in the /// file is part of the module. /// reference symbol table - is the defined and undefined external symbols /// /// For dynamically linked shared library files this load command also contains /// offsets and sizes to the pool of relocation entries for all sections /// separated into two groups: /// external relocation entries /// local relocation entries /// For executable and object modules the relocation entries continue to hang /// off the section structures. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DysymtabCommand { pub cmd: u32, pub cmdsize: u32, /// index to local symbols pub ilocalsym: u32, /// number of local symbols pub nlocalsym: u32, /// index to externally defined symbols pub iextdefsym: u32, /// number of externally defined symbols pub nextdefsym: u32, /// index to undefined symbols pub iundefsym: u32, /// number of undefined symbols pub nundefsym: u32, /// file offset to table of contents pub tocoff: u32, /// number of entries in table of contents pub ntoc: u32, /// file offset to module table pub modtaboff: u32, /// number of module table entries pub nmodtab: u32, /// offset to referenced symbol table pub extrefsymoff: u32, /// number of referenced symbol table entries pub nextrefsyms: u32, /// file offset to the indirect symbol table pub indirectsymoff: u32, /// number of indirect symbol table entries pub nindirectsyms: u32, /// offset to external relocation entries pub extreloff: u32, /// number of external relocation entries pub nextrel: u32, /// offset to local relocation entries pub locreloff: u32, /// number of local relocation entries pub nlocrel: u32, } impl DysymtabCommand { pub fn new() -> Self { DysymtabCommand { cmd: LC_DYSYMTAB, cmdsize: SIZEOF_DYSYMTAB_COMMAND as u32, ilocalsym: 0, nlocalsym: 0, iextdefsym: 0, nextdefsym: 0, iundefsym: 0, nundefsym: 0, tocoff: 0, ntoc: 0, modtaboff: 0, nmodtab: 0, extrefsymoff: 0, nextrefsyms: 0, indirectsymoff: 0, nindirectsyms: 0, extreloff: 0, nextrel: 0, locreloff: 0, nlocrel: 0, } } } pub const SIZEOF_DYSYMTAB_COMMAND: usize = 80; // TODO: unimplemented /// a table of contents entry #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylibTableOfContents { /// the defined external symbol (index into the symbol table) pub symbol_index: u32, /// index into the module table this symbol is defined in pub module_index: u32, } // TODO: unimplemented /// a module table entry #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylibModule { /// the module name (index into string table) pub module_name: u32, ///index into externally defined symbols pub iextdefsym: u32, ///number of externally defined symbols pub nextdefsym: u32, /// index into reference symbol table pub irefsym: u32, ///number of reference symbol table entries pub nrefsym: u32, /// index into symbols for local symbols pub ilocalsym: u32, ///number of local symbols pub nlocalsym: u32, /// index into external relocation entries pub iextrel: u32, /// number of external relocation entries pub nextrel: u32, /// low 16 bits are the index into the init section, high 16 bits are the index into the term section pub iinit_iterm: u32, /// low 16 bits are the number of init section entries, high 16 bits are the number of term section entries pub ninit_nterm: u32, /// the (__OBJC,_module_info) section pub objc_module_info_addr: u32, /// the (__OBJC,__module_info) section pub objc_module_info_size: u32, } // TODO: unimplemented /// a 64-bit module table entry #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylibModule64 { /// the module name (index into string table) pub module_name: u32, /// index into externally defined symbols pub iextdefsym: u32, /// number of externally defined symbols pub nextdefsym: u32, /// index into reference symbol table pub irefsym: u32, /// number of reference symbol table entries pub nrefsym: u32, /// index into symbols for local symbols pub ilocalsym: u32, /// number of local symbols pub nlocalsym: u32, /// index into external relocation entries pub iextrel: u32, /// number of external relocation entries pub nextrel: u32, /// low 16 bits are the index into the init section, high 16 bits are the index into the term section pub iinit_iterm: u32, /// low 16 bits are the number of init section entries, high 16 bits are the number of term section entries pub ninit_nterm: u32, /// the (__OBJC,__module_info) section pub objc_module_info_size: u32, /// the (__OBJC,__module_info) section pub objc_module_info_addr: u64, } /// The entries in the reference symbol table are used when loading the module /// (both by the static and dynamic link editors) and if the module is unloaded /// or replaced. Therefore all external symbols (defined and undefined) are /// listed in the module's reference table. The flags describe the type of /// reference that is being made. The constants for the flags are defined in /// <mach-o/nlist.h> as they are also used for symbol table entries. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DylibReference { /// 24 bits bit-field index into the symbol table pub isym: [u8; 24], /// flags to indicate the type of reference pub flags: u64, } /// The twolevel_hints_command contains the offset and number of hints in the /// two-level namespace lookup hints table. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct TwolevelHintsCommand { /// LC_TWOLEVEL_HINTS pub cmd: u32, /// sizeof(struct twolevel_hints_command) pub cmdsize: u32, /// offset to the hint table pub offset: u32, /// number of hints in the hint table pub nhints: u32, } /// The entries in the two-level namespace lookup hints table are twolevel_hint /// structs. These provide hints to the dynamic link editor where to start /// looking for an undefined symbol in a two-level namespace image. The /// isub_image field is an index into the sub-images (sub-frameworks and /// sub-umbrellas list) that made up the two-level image that the undefined /// symbol was found in when it was built by the static link editor. If /// isub-image is 0 the the symbol is expected to be defined in library and not /// in the sub-images. If isub-image is non-zero it is an index into the array /// of sub-images for the umbrella with the first index in the sub-images being /// 1. The array of sub-images is the ordered list of sub-images of the umbrella /// that would be searched for a symbol that has the umbrella recorded as its /// primary library. The table of contents index is an index into the /// library's table of contents. This is used as the starting point of the /// binary search or a directed linear search. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct TwolevelHint { /// index into the sub images pub isub_image: u64, /// 24 bit field index into the table of contents pub itoc: [u8; 24], } /// The prebind_cksum_command contains the value of the original check sum for /// prebound files or zero. When a prebound file is first created or modified /// for other than updating its prebinding information the value of the check sum /// is set to zero. When the file has it prebinding re-done and if the value of /// the check sum is zero the original check sum is calculated and stored in /// cksum field of this load command in the output file. If when the prebinding /// is re-done and the cksum field is non-zero it is left unchanged from the /// input file. // TODO: unimplemented #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct PrebindCksumCommand { /// LC_PREBIND_CKSUM pub cmd: u32, /// sizeof(struct prebind_cksum_command) pub cmdsize: u32, /// the check sum or zero pub cksum: u32, } /// The uuid load command contains a single 128-bit unique random number that /// identifies an object produced by the static link editor. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct UuidCommand { /// LC_UUID pub cmd: u32, /// sizeof(struct uuid_command) pub cmdsize: u32, /// 16 bytes the 128-bit uuid pub uuid: [u8; 16], } pub const SIZEOF_UUID_COMMAND: usize = 24; /// The rpath_command contains a path which at runtime should be added to /// the current run path used to find @rpath prefixed dylibs. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct RpathCommand { /// LC_RPATH pub cmd: u32, /// includes string pub cmdsize: u32, /// path to add to run path pub path: LcStr, } pub const SIZEOF_RPATH_COMMAND: usize = 12; /// The linkedit_data_command contains the offsets and sizes of a blob /// of data in the __LINKEDIT segment. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct LinkeditDataCommand { /// LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS or LC_LINKER_OPTIMIZATION_HINT. pub cmd: u32, /// sizeof(struct linkedit_data_command) pub cmdsize: u32, /// file offset of data in __LINKEDIT segment pub dataoff: u32, /// file size of data in __LINKEDIT segment pub datasize: u32, } pub const SIZEOF_LINKEDIT_DATA_COMMAND: usize = 16; /// The encryption_info_command contains the file offset and size of an /// of an encrypted segment. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct EncryptionInfoCommand32 { /// LC_ENCRYPTION_INFO pub cmd: u32, /// sizeof(struct encryption_info_command) pub cmdsize: u32, /// file offset of encrypted range pub cryptoff: u32, /// file size of encrypted range pub cryptsize: u32, /// which enryption system, 0 means not-encrypted yet pub cryptid: u32, } pub const SIZEOF_ENCRYPTION_INFO_COMMAND_32: usize = 20; /// The encryption_info_command_64 contains the file offset and size of an /// of an encrypted segment (for use in x86_64 targets). #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct EncryptionInfoCommand64 { /// LC_ENCRYPTION_INFO_64 pub cmd: u32, /// sizeof(struct encryption_info_command_64) pub cmdsize: u32, /// file offset of encrypted range pub cryptoff: u32, /// file size of encrypted range pub cryptsize: u32, /// which enryption system, 0 means not-encrypted yet pub cryptid: u32, /// padding to make this struct's size a multiple of 8 bytes pub pad: u32, } pub const SIZEOF_ENCRYPTION_INFO_COMMAND_64: usize = 24; /// The version_min_command contains the min OS version on which this /// binary was built to run. /// /// LC_VERSION_MIN_MACOSX or LC_VERSION_MIN_IPHONEOS #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct VersionMinCommand { pub cmd: u32, pub cmdsize: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz pub version: u32, /// X.Y.Z is encoded in nibbles xxxx.yy.zz pub sdk: u32, } impl VersionMinCommand { pub fn new(is_ios: bool) -> Self { VersionMinCommand { cmd: if is_ios { LC_VERSION_MIN_IPHONEOS } else { LC_VERSION_MIN_MACOSX }, cmdsize: SIZEOF_VERSION_MIN_COMMAND as u32, version: 0, sdk: 0, } } } pub const SIZEOF_VERSION_MIN_COMMAND: usize = 16; #[repr(C)] #[derive(Default, Debug, Clone, Copy, Pread, Pwrite, SizeWith)] pub struct DyldInfoCommand { /// LC_DYLD_INFO or LC_DYLD_INFO_ONLY pub cmd: u32, /// sizeof(struct dyld_info_command) pub cmdsize: u32, /// file offset to rebase info pub rebase_off: u32, /// size of rebase info pub rebase_size: u32, /// file offset to binding info pub bind_off: u32, /// size of binding info pub bind_size: u32, /// file offset to weak binding info pub weak_bind_off: u32, /// size of weak binding info pub weak_bind_size: u32, /// file offset to lazy binding info pub lazy_bind_off: u32, /// size of lazy binding infs pub lazy_bind_size: u32, /// file offset to lazy binding info pub export_off: u32, /// size of lazy binding infs pub export_size: u32, } pub const SIZEOF_DYLIB_INFO_COMMAND: usize = 48; /// The linker_option_command contains linker options embedded in object files. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct LinkerOptionCommand { /// LC_LINKER_OPTION only used in MH_OBJECT fipub constypes pub cmd: u32, pub cmdsize: u32, /// number of strings concatenation of zero terminated UTF8 strings. Zero filled at end to align pub count: u32, } pub const SIZEOF_LINKER_OPTION_COMMAND: usize = 12; /// The symseg_command contains the offset and size of the GNU style /// symbol table information as described in the header file <symseg.h>. /// The symbol roots of the symbol segments must also be aligned properly /// in the file. So the requirement of keeping the offsets aligned to a /// multiple of a 4 bytes translates to the length field of the symbol /// roots also being a multiple of a long. Also the padding must again be /// zeroed. (THIS IS OBSOLETE and no longer supported). #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SymsegCommand { /// LC_SYMSEG pub cmd: u32, /// sizeof(struct symseg_command) pub cmdsize: u32, /// symbol segment offset pub offset: u32, /// symbol segment size in bytes pub size: u32, } pub const SIZEOF_SYMSEG_COMMAND: usize = 16; /// The ident_command contains a free format string table following the /// ident_command structure. The strings are null terminated and the size of /// the command is padded out with zero bytes to a multiple of 4 bytes/ /// (THIS IS OBSOLETE and no longer supported). #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct IdentCommand { /// LC_IDENT pub cmd: u32, /// strings that follow this command pub cmdsize: u32, } pub const SIZEOF_IDENT_COMMAND: usize = 8; /// The fvmfile_command contains a reference to a file to be loaded at the /// specified virtual address. (Presently, this command is reserved for /// internal use. The kernel ignores this command when loading a program into /// memory). #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct FvmfileCommand { /// LC_FVMFILE pub cmd: u32, /// includes pathname string pub cmdsize: u32, /// files pathname pub name: LcStr, /// files virtual address pub header_addr: u32, } pub const SIZEOF_FVMFILE_COMMAND: usize = 16; /// The entry_point_command is a replacement for thread_command. /// It is used for main executables to specify the location (file offset) /// of main(). If -stack_size was used at link time, the stacksize /// field will contain the stack size need for the main thread. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct EntryPointCommand { pub cmd: u32, pub cmdsize: u32, /// uint64_t file __TEXT offset of main pub entryoff: u64, /// uint64_t if not zero, initial stack size pub stacksize: u64, } pub const SIZEOF_ENTRY_POINT_COMMAND: usize = 24; /// The source_version_command is an optional load command containing /// the version of the sources used to build the binary. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct SourceVersionCommand { /// LC_SOURCE_VERSION pub cmd: u32, pub cmdsize: u32, /// A.B.C.D.E packed as a24.b10.c10.d10.e10 pub version: u64, } /// The LC_DATA_IN_CODE load commands uses a linkedit_data_command /// to point to an array of data_in_code_entry entries. Each entry /// describes a range of data in a code section. #[repr(C)] #[derive(Debug, Clone, Copy, Pread, Pwrite, IOread, IOwrite, SizeWith)] pub struct DataInCodeEntry { /// from mach_header to start of data range pub offset: u32, /// number of bytes in data range pub length: u16, /// a DICE_KIND_* value pub kind: u16, } /////////////////////////////////////// // Constants, et. al /////////////////////////////////////// pub const LC_REQ_DYLD: u32 = 0x80000000; pub const LC_LOAD_WEAK_DYLIB: u32 = 0x18 | LC_REQ_DYLD; pub const LC_RPATH: u32 = 0x1c | LC_REQ_DYLD; pub const LC_REEXPORT_DYLIB: u32 = 0x1f | LC_REQ_DYLD; pub const LC_DYLD_INFO_ONLY: u32 = 0x22 | LC_REQ_DYLD; pub const LC_LOAD_UPWARD_DYLIB: u32 = 0x23 | LC_REQ_DYLD; pub const LC_MAIN: u32 = 0x28 | LC_REQ_DYLD; pub const LC_SEGMENT: u32 = 0x1; pub const LC_SYMTAB: u32 = 0x2; pub const LC_SYMSEG: u32 = 0x3; pub const LC_THREAD: u32 = 0x4; pub const LC_UNIXTHREAD: u32 = 0x5; pub const LC_LOADFVMLIB: u32 = 0x6; pub const LC_IDFVMLIB: u32 = 0x7; pub const LC_IDENT: u32 = 0x8; pub const LC_FVMFILE: u32 = 0x9; pub const LC_PREPAGE: u32 = 0xa; pub const LC_DYSYMTAB: u32 = 0xb; pub const LC_LOAD_DYLIB: u32 = 0xc; pub const LC_ID_DYLIB: u32 = 0xd; pub const LC_LOAD_DYLINKER: u32 = 0xe; pub const LC_ID_DYLINKER: u32 = 0xf; pub const LC_PREBOUND_DYLIB: u32 = 0x10; pub const LC_ROUTINES: u32 = 0x11; pub const LC_SUB_FRAMEWORK: u32 = 0x12; pub const LC_SUB_UMBRELLA: u32 = 0x13; pub const LC_SUB_CLIENT: u32 = 0x14; pub const LC_SUB_LIBRARY: u32 = 0x15; pub const LC_TWOLEVEL_HINTS: u32 = 0x16; pub const LC_PREBIND_CKSUM: u32 = 0x17; pub const LC_SEGMENT_64: u32 = 0x19; pub const LC_ROUTINES_64: u32 = 0x1a; pub const LC_UUID: u32 = 0x1b; pub const LC_CODE_SIGNATURE: u32 = 0x1d; pub const LC_SEGMENT_SPLIT_INFO: u32 = 0x1e; pub const LC_LAZY_LOAD_DYLIB: u32 = 0x20; pub const LC_ENCRYPTION_INFO: u32 = 0x21; pub const LC_DYLD_INFO: u32 = 0x22; pub const LC_VERSION_MIN_MACOSX: u32 = 0x24; pub const LC_VERSION_MIN_IPHONEOS: u32 = 0x25; pub const LC_FUNCTION_STARTS: u32 = 0x26; pub const LC_DYLD_ENVIRONMENT: u32 = 0x27; pub const LC_DATA_IN_CODE: u32 = 0x29; pub const LC_SOURCE_VERSION: u32 = 0x2A; pub const LC_DYLIB_CODE_SIGN_DRS: u32 = 0x2B; pub const LC_ENCRYPTION_INFO_64: u32 = 0x2C; pub const LC_LINKER_OPTION: u32 = 0x2D; pub const LC_LINKER_OPTIMIZATION_HINT: u32 = 0x2E; pub fn cmd_to_str(cmd: u32) -> &'static str { match cmd { LC_SEGMENT => "LC_SEGMENT", LC_SYMTAB => "LC_SYMTAB", LC_SYMSEG => "LC_SYMSEG", LC_THREAD => "LC_THREAD", LC_UNIXTHREAD => "LC_UNIXTHREAD", LC_LOADFVMLIB => "LC_LOADFVMLIB", LC_IDFVMLIB => "LC_IDFVMLIB", LC_IDENT => "LC_IDENT", LC_FVMFILE => "LC_FVMFILE", LC_PREPAGE => "LC_PREPAGE", LC_DYSYMTAB => "LC_DYSYMTAB", LC_LOAD_DYLIB => "LC_LOAD_DYLIB", LC_ID_DYLIB => "LC_ID_DYLIB", LC_LOAD_DYLINKER => "LC_LOAD_DYLINKER", LC_ID_DYLINKER => "LC_ID_DYLINKER", LC_PREBOUND_DYLIB => "LC_PREBOUND_DYLIB", LC_ROUTINES => "LC_ROUTINES", LC_SUB_FRAMEWORK => "LC_SUB_FRAMEWORK", LC_SUB_UMBRELLA => "LC_SUB_UMBRELLA", LC_SUB_CLIENT => "LC_SUB_CLIENT", LC_SUB_LIBRARY => "LC_SUB_LIBRARY", LC_TWOLEVEL_HINTS => "LC_TWOLEVEL_HINTS", LC_PREBIND_CKSUM => "LC_PREBIND_CKSUM", LC_LOAD_WEAK_DYLIB => "LC_LOAD_WEAK_DYLIB", LC_SEGMENT_64 => "LC_SEGMENT_64", LC_ROUTINES_64 => "LC_ROUTINES_64", LC_UUID => "LC_UUID", LC_RPATH => "LC_RPATH", LC_CODE_SIGNATURE => "LC_CODE_SIGNATURE", LC_SEGMENT_SPLIT_INFO => "LC_SEGMENT_SPLIT_INFO", LC_REEXPORT_DYLIB => "LC_REEXPORT_DYLIB", LC_LAZY_LOAD_DYLIB => "LC_LAZY_LOAD_DYLIB", LC_ENCRYPTION_INFO => "LC_ENCRYPTION_INFO", LC_DYLD_INFO => "LC_DYLD_INFO", LC_DYLD_INFO_ONLY => "LC_DYLD_INFO_ONLY", LC_LOAD_UPWARD_DYLIB => "LC_LOAD_UPWARD_DYLIB", LC_VERSION_MIN_MACOSX => "LC_VERSION_MIN_MACOSX", LC_VERSION_MIN_IPHONEOS => "LC_VERSION_MIN_IPHONEOS", LC_FUNCTION_STARTS => "LC_FUNCTION_STARTS", LC_DYLD_ENVIRONMENT => "LC_DYLD_ENVIRONMENT", LC_MAIN => "LC_MAIN", LC_DATA_IN_CODE => "LC_DATA_IN_CODE", LC_SOURCE_VERSION => "LC_SOURCE_VERSION", LC_DYLIB_CODE_SIGN_DRS => "LC_DYLIB_CODE_SIGN_DRS", LC_ENCRYPTION_INFO_64 => "LC_ENCRYPTION_INFO_64", LC_LINKER_OPTION => "LC_LINKER_OPTION", LC_LINKER_OPTIMIZATION_HINT => "LC_LINKER_OPTIMIZATION_HINT", _ => "LC_UNKNOWN", } } /////////////////////////////////////////// // Typesafe Command Variants /////////////////////////////////////////// #[derive(Debug)] /// The various load commands as a cast-free variant/enum pub enum CommandVariant { Segment32 (SegmentCommand32), Segment64 (SegmentCommand64), Uuid (UuidCommand), Symtab (SymtabCommand), Symseg (SymsegCommand), Thread (ThreadCommand), Unixthread (ThreadCommand), LoadFvmlib (FvmlibCommand), IdFvmlib (FvmlibCommand), Ident (IdentCommand), Fvmfile (FvmfileCommand), Prepage (LoadCommandHeader), Dysymtab (DysymtabCommand), LoadDylib (DylibCommand), IdDylib (DylibCommand), LoadDylinker (DylinkerCommand), IdDylinker (DylinkerCommand), PreboundDylib (PreboundDylibCommand), Routines32 (RoutinesCommand32), Routines64 (RoutinesCommand64), SubFramework (SubFrameworkCommand), SubUmbrella (SubUmbrellaCommand), SubClient (SubClientCommand), SubLibrary (SubLibraryCommand), TwolevelHints (TwolevelHintsCommand), PrebindCksum (PrebindCksumCommand), LoadWeakDylib (DylibCommand), Rpath (RpathCommand), CodeSignature (LinkeditDataCommand), SegmentSplitInfo (LinkeditDataCommand), ReexportDylib (DylibCommand), LazyLoadDylib (DylibCommand), EncryptionInfo32 (EncryptionInfoCommand32), EncryptionInfo64 (EncryptionInfoCommand64), DyldInfo (DyldInfoCommand), DyldInfoOnly (DyldInfoCommand), LoadUpwardDylib (DylibCommand), VersionMinMacosx (VersionMinCommand), VersionMinIphoneos (VersionMinCommand), FunctionStarts (LinkeditDataCommand), DyldEnvironment (DylinkerCommand), Main (EntryPointCommand), DataInCode (LinkeditDataCommand), SourceVersion (SourceVersionCommand), DylibCodeSignDrs (LinkeditDataCommand), LinkerOption (LinkeditDataCommand), LinkerOptimizationHint (LinkeditDataCommand), Unimplemented (LoadCommandHeader), } impl<'a> ctx::TryFromCtx<'a, Endian> for CommandVariant { type Error = ::error::Error; type Size = usize; fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, Self::Size)> { use scroll::{Pread}; use self::CommandVariant::*; let lc = bytes.pread_with::<LoadCommandHeader>(0, le)?; let size = lc.cmdsize as usize; //println!("offset {:#x} cmd: {:#x} size: {:?} ctx: {:?}", offset, lc.cmd, size, le); if size > bytes.len() { return Err(error::Error::Malformed(format!("{} has size larger than remainder of binary: {:?}", &lc, bytes.len()))) } match lc.cmd { LC_SEGMENT => { let comm = bytes.pread_with::<SegmentCommand32> (0, le)?; Ok((Segment32 (comm), size))}, LC_SEGMENT_64 => { let comm = bytes.pread_with::<SegmentCommand64> (0, le)?; Ok((Segment64 (comm), size))}, LC_DYSYMTAB => { let comm = bytes.pread_with::<DysymtabCommand> (0, le)?; Ok((Dysymtab (comm), size))}, LC_LOAD_DYLINKER => { let comm = bytes.pread_with::<DylinkerCommand> (0, le)?; Ok((LoadDylinker (comm), size))}, LC_ID_DYLINKER => { let comm = bytes.pread_with::<DylinkerCommand> (0, le)?; Ok((IdDylinker (comm), size))}, LC_UUID => { let comm = bytes.pread_with::<UuidCommand> (0, le)?; Ok((Uuid (comm), size))}, LC_SYMTAB => { let comm = bytes.pread_with::<SymtabCommand> (0, le)?; Ok((Symtab (comm), size))}, LC_SYMSEG => { let comm = bytes.pread_with::<SymsegCommand> (0, le)?; Ok((Symseg (comm), size))}, LC_THREAD => { let comm = bytes.pread_with::<ThreadCommand> (0, le)?; Ok((Thread (comm), size))}, LC_UNIXTHREAD => { let comm = bytes.pread_with::<ThreadCommand> (0, le)?; Ok((Unixthread (comm), size))}, LC_LOADFVMLIB => { let comm = bytes.pread_with::<FvmlibCommand> (0, le)?; Ok((LoadFvmlib (comm), size))}, LC_IDFVMLIB => { let comm = bytes.pread_with::<FvmlibCommand> (0, le)?; Ok((IdFvmlib (comm), size))}, LC_IDENT => { let comm = bytes.pread_with::<IdentCommand> (0, le)?; Ok((Ident (comm), size))}, LC_FVMFILE => { let comm = bytes.pread_with::<FvmfileCommand> (0, le)?; Ok((Fvmfile (comm), size))}, LC_PREPAGE => { let comm = bytes.pread_with::<LoadCommandHeader> (0, le)?; Ok((Prepage (comm), size))}, LC_LOAD_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((LoadDylib (comm), size))}, LC_ID_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((IdDylib (comm), size))}, LC_PREBOUND_DYLIB => { let comm = bytes.pread_with::<PreboundDylibCommand> (0, le)?; Ok((PreboundDylib (comm), size))}, LC_ROUTINES => { let comm = bytes.pread_with::<RoutinesCommand32> (0, le)?; Ok((Routines32 (comm), size))}, LC_ROUTINES_64 => { let comm = bytes.pread_with::<RoutinesCommand64> (0, le)?; Ok((Routines64 (comm), size))}, LC_SUB_FRAMEWORK => { let comm = bytes.pread_with::<SubFrameworkCommand> (0, le)?; Ok((SubFramework (comm), size))}, LC_SUB_UMBRELLA => { let comm = bytes.pread_with::<SubUmbrellaCommand> (0, le)?; Ok((SubUmbrella (comm), size))}, LC_SUB_CLIENT => { let comm = bytes.pread_with::<SubClientCommand> (0, le)?; Ok((SubClient (comm), size))}, LC_SUB_LIBRARY => { let comm = bytes.pread_with::<SubLibraryCommand> (0, le)?; Ok((SubLibrary (comm), size))}, LC_TWOLEVEL_HINTS => { let comm = bytes.pread_with::<TwolevelHintsCommand> (0, le)?; Ok((TwolevelHints (comm), size))}, LC_PREBIND_CKSUM => { let comm = bytes.pread_with::<PrebindCksumCommand> (0, le)?; Ok((PrebindCksum (comm), size))}, LC_LOAD_WEAK_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((LoadWeakDylib (comm), size))}, LC_RPATH => { let comm = bytes.pread_with::<RpathCommand> (0, le)?; Ok((Rpath (comm), size))}, LC_CODE_SIGNATURE => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((CodeSignature (comm), size))}, LC_SEGMENT_SPLIT_INFO => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((SegmentSplitInfo (comm), size))}, LC_REEXPORT_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((ReexportDylib (comm), size))}, LC_LAZY_LOAD_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((LazyLoadDylib (comm), size))}, LC_ENCRYPTION_INFO => { let comm = bytes.pread_with::<EncryptionInfoCommand32>(0, le)?; Ok((EncryptionInfo32 (comm), size))}, LC_ENCRYPTION_INFO_64 => { let comm = bytes.pread_with::<EncryptionInfoCommand64>(0, le)?; Ok((EncryptionInfo64 (comm), size))}, LC_DYLD_INFO => { let comm = bytes.pread_with::<DyldInfoCommand> (0, le)?; Ok((DyldInfo (comm), size))}, LC_DYLD_INFO_ONLY => { let comm = bytes.pread_with::<DyldInfoCommand> (0, le)?; Ok((DyldInfoOnly (comm), size))}, LC_LOAD_UPWARD_DYLIB => { let comm = bytes.pread_with::<DylibCommand> (0, le)?; Ok((LoadUpwardDylib (comm), size))}, LC_VERSION_MIN_MACOSX => { let comm = bytes.pread_with::<VersionMinCommand> (0, le)?; Ok((VersionMinMacosx (comm), size))}, LC_VERSION_MIN_IPHONEOS => { let comm = bytes.pread_with::<VersionMinCommand> (0, le)?; Ok((VersionMinIphoneos (comm), size))}, LC_FUNCTION_STARTS => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((FunctionStarts (comm), size))}, LC_DYLD_ENVIRONMENT => { let comm = bytes.pread_with::<DylinkerCommand> (0, le)?; Ok((DyldEnvironment (comm), size))}, LC_MAIN => { let comm = bytes.pread_with::<EntryPointCommand> (0, le)?; Ok((Main (comm), size))}, LC_DATA_IN_CODE => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((DataInCode (comm), size))}, LC_SOURCE_VERSION => { let comm = bytes.pread_with::<SourceVersionCommand> (0, le)?; Ok((SourceVersion (comm), size))}, LC_DYLIB_CODE_SIGN_DRS => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((DylibCodeSignDrs (comm), size))}, LC_LINKER_OPTION => { let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((LinkerOption (comm), size))}, LC_LINKER_OPTIMIZATION_HINT => {let comm = bytes.pread_with::<LinkeditDataCommand> (0, le)?; Ok((LinkerOptimizationHint (comm), size))}, _ => Ok((Unimplemented (lc.clone()), size)), } } } impl CommandVariant { pub fn cmdsize(&self) -> usize { use self::CommandVariant::*; let cmdsize = match *self { Segment32 (comm) => comm.cmdsize, Segment64 (comm) => comm.cmdsize, Uuid (comm) => comm.cmdsize, Symtab (comm) => comm.cmdsize, Symseg (comm) => comm.cmdsize, Thread (comm) => comm.cmdsize, Unixthread (comm) => comm.cmdsize, LoadFvmlib (comm) => comm.cmdsize, IdFvmlib (comm) => comm.cmdsize, Ident (comm) => comm.cmdsize, Fvmfile (comm) => comm.cmdsize, Prepage (comm) => comm.cmdsize, Dysymtab (comm) => comm.cmdsize, LoadDylib (comm) => comm.cmdsize, IdDylib (comm) => comm.cmdsize, LoadDylinker (comm) => comm.cmdsize, IdDylinker (comm) => comm.cmdsize, PreboundDylib (comm) => comm.cmdsize, Routines32 (comm) => comm.cmdsize, Routines64 (comm) => comm.cmdsize, SubFramework (comm) => comm.cmdsize, SubUmbrella (comm) => comm.cmdsize, SubClient (comm) => comm.cmdsize, SubLibrary (comm) => comm.cmdsize, TwolevelHints (comm) => comm.cmdsize, PrebindCksum (comm) => comm.cmdsize, LoadWeakDylib (comm) => comm.cmdsize, Rpath (comm) => comm.cmdsize, CodeSignature (comm) => comm.cmdsize, SegmentSplitInfo (comm) => comm.cmdsize, ReexportDylib (comm) => comm.cmdsize, LazyLoadDylib (comm) => comm.cmdsize, EncryptionInfo32 (comm) => comm.cmdsize, EncryptionInfo64 (comm) => comm.cmdsize, DyldInfo (comm) => comm.cmdsize, DyldInfoOnly (comm) => comm.cmdsize, LoadUpwardDylib (comm) => comm.cmdsize, VersionMinMacosx (comm) => comm.cmdsize, VersionMinIphoneos (comm) => comm.cmdsize, FunctionStarts (comm) => comm.cmdsize, DyldEnvironment (comm) => comm.cmdsize, Main (comm) => comm.cmdsize, DataInCode (comm) => comm.cmdsize, SourceVersion (comm) => comm.cmdsize, DylibCodeSignDrs (comm) => comm.cmdsize, LinkerOption (comm) => comm.cmdsize, LinkerOptimizationHint (comm) => comm.cmdsize, Unimplemented (comm) => comm.cmdsize, }; cmdsize as usize } pub fn cmd(&self) -> u32 { use self::CommandVariant::*; match *self { Segment32 (comm) => comm.cmd, Segment64 (comm) => comm.cmd, Uuid (comm) => comm.cmd, Symtab (comm) => comm.cmd, Symseg (comm) => comm.cmd, Thread (comm) => comm.cmd, Unixthread (comm) => comm.cmd, LoadFvmlib (comm) => comm.cmd, IdFvmlib (comm) => comm.cmd, Ident (comm) => comm.cmd, Fvmfile (comm) => comm.cmd, Prepage (comm) => comm.cmd, Dysymtab (comm) => comm.cmd, LoadDylib (comm) => comm.cmd, IdDylib (comm) => comm.cmd, LoadDylinker (comm) => comm.cmd, IdDylinker (comm) => comm.cmd, PreboundDylib (comm) => comm.cmd, Routines32 (comm) => comm.cmd, Routines64 (comm) => comm.cmd, SubFramework (comm) => comm.cmd, SubUmbrella (comm) => comm.cmd, SubClient (comm) => comm.cmd, SubLibrary (comm) => comm.cmd, TwolevelHints (comm) => comm.cmd, PrebindCksum (comm) => comm.cmd, LoadWeakDylib (comm) => comm.cmd, Rpath (comm) => comm.cmd, CodeSignature (comm) => comm.cmd, SegmentSplitInfo (comm) => comm.cmd, ReexportDylib (comm) => comm.cmd, LazyLoadDylib (comm) => comm.cmd, EncryptionInfo32 (comm) => comm.cmd, EncryptionInfo64 (comm) => comm.cmd, DyldInfo (comm) => comm.cmd, DyldInfoOnly (comm) => comm.cmd, LoadUpwardDylib (comm) => comm.cmd, VersionMinMacosx (comm) => comm.cmd, VersionMinIphoneos (comm) => comm.cmd, FunctionStarts (comm) => comm.cmd, DyldEnvironment (comm) => comm.cmd, Main (comm) => comm.cmd, DataInCode (comm) => comm.cmd, SourceVersion (comm) => comm.cmd, DylibCodeSignDrs (comm) => comm.cmd, LinkerOption (comm) => comm.cmd, LinkerOptimizationHint (comm) => comm.cmd, Unimplemented (comm) => comm.cmd, } } } #[derive(Debug)] /// A tagged LoadCommand union pub struct LoadCommand { /// The offset this load command occurs at pub offset: usize, /// Which load command this is inside a variant pub command: CommandVariant, } impl LoadCommand { /// Parse a load command from `bytes` at `offset` with the `le` endianness pub fn parse(bytes: &[u8], offset: &mut usize, le: scroll::Endian) -> error::Result<Self> { let start = *offset; let command = bytes.pread_with::<CommandVariant>(start, le)?; let size = command.cmdsize(); *offset = start + size; Ok(LoadCommand { offset: start, command }) } }
41.828457
207
0.614243
f82342226101f2c8dad9fecccd0942437de161c2
1,075
// Copyright 2018-2019 Cryptape Technologies LLC. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. macro_rules! check_rand { ($name:ident, $uint:ident) => { #[test] fn $name() { let x = nfuint::$uint::thread_random(); let y = nfuint::$uint::thread_random(); // If this test is failed, please check if there is a bug or you are too luckly. assert!(!x.is_zero()); assert!(!y.is_zero()); assert!(x != y); } }; } check_rand!(rand_u128, U128); check_rand!(rand_u160, U160); check_rand!(rand_u224, U224); check_rand!(rand_u256, U256); check_rand!(rand_u384, U384); check_rand!(rand_u512, U512); check_rand!(rand_u520, U520); check_rand!(rand_u1024, U1024); check_rand!(rand_u2048, U2048); check_rand!(rand_u4096, U4096);
32.575758
92
0.648372
871bcab50b0913d4011ebe01aa2b12ae7d7e8075
7,593
use itertools::Itertools; use stdx::to_lower_snake_case; use syntax::ast::VisibilityOwner; use syntax::ast::{self, AstNode, NameOwner}; use crate::{ utils::{add_method_to_adt, find_struct_impl}, AssistContext, AssistId, AssistKind, Assists, }; // Assist: generate_enum_try_into_method // // Generate an `try_into_` method for an enum variant. // // ``` // enum Value { // Number(i32), // Text(String)$0, // } // ``` // -> // ``` // enum Value { // Number(i32), // Text(String), // } // // impl Value { // fn try_into_text(self) -> Result<String, Self> { // if let Self::Text(v) = self { // Ok(v) // } else { // Err(self) // } // } // } // ``` pub(crate) fn generate_enum_try_into_method(acc: &mut Assists, ctx: &AssistContext) -> Option<()> { generate_enum_projection_method( acc, ctx, "generate_enum_try_into_method", "Generate an `try_into_` method for an enum variant", ProjectionProps { fn_name_prefix: "try_into", self_param: "self", return_prefix: "Result<", return_suffix: ", Self>", happy_case: "Ok", sad_case: "Err(self)", }, ) } // Assist: generate_enum_as_method // // Generate an `as_` method for an enum variant. // // ``` // enum Value { // Number(i32), // Text(String)$0, // } // ``` // -> // ``` // enum Value { // Number(i32), // Text(String), // } // // impl Value { // fn as_text(&self) -> Option<&String> { // if let Self::Text(v) = self { // Some(v) // } else { // None // } // } // } // ``` pub(crate) fn generate_enum_as_method(acc: &mut Assists, ctx: &AssistContext) -> Option<()> { generate_enum_projection_method( acc, ctx, "generate_enum_as_method", "Generate an `as_` method for an enum variant", ProjectionProps { fn_name_prefix: "as", self_param: "&self", return_prefix: "Option<&", return_suffix: ">", happy_case: "Some", sad_case: "None", }, ) } struct ProjectionProps { fn_name_prefix: &'static str, self_param: &'static str, return_prefix: &'static str, return_suffix: &'static str, happy_case: &'static str, sad_case: &'static str, } fn generate_enum_projection_method( acc: &mut Assists, ctx: &AssistContext, assist_id: &'static str, assist_description: &str, props: ProjectionProps, ) -> Option<()> { let variant = ctx.find_node_at_offset::<ast::Variant>()?; let variant_name = variant.name()?; let parent_enum = ast::Adt::Enum(variant.parent_enum()); let (pattern_suffix, field_type, bound_name) = match variant.kind() { ast::StructKind::Record(record) => { let (field,) = record.fields().collect_tuple()?; let name = field.name()?.to_string(); let ty = field.ty()?; let pattern_suffix = format!(" {{ {} }}", name); (pattern_suffix, ty, name) } ast::StructKind::Tuple(tuple) => { let (field,) = tuple.fields().collect_tuple()?; let ty = field.ty()?; ("(v)".to_owned(), ty, "v".to_owned()) } ast::StructKind::Unit => return None, }; let fn_name = format!("{}_{}", props.fn_name_prefix, &to_lower_snake_case(variant_name.text())); // Return early if we've found an existing new fn let impl_def = find_struct_impl(&ctx, &parent_enum, &fn_name)?; let target = variant.syntax().text_range(); acc.add(AssistId(assist_id, AssistKind::Generate), assist_description, target, |builder| { let vis = parent_enum.visibility().map_or(String::new(), |v| format!("{} ", v)); let method = format!( " {0}fn {1}({2}) -> {3}{4}{5} {{ if let Self::{6}{7} = self {{ {8}({9}) }} else {{ {10} }} }}", vis, fn_name, props.self_param, props.return_prefix, field_type.syntax(), props.return_suffix, variant_name, pattern_suffix, props.happy_case, bound_name, props.sad_case, ); add_method_to_adt(builder, &parent_enum, impl_def, &method); }) } #[cfg(test)] mod tests { use crate::tests::{check_assist, check_assist_not_applicable}; use super::*; #[test] fn test_generate_enum_try_into_tuple_variant() { check_assist( generate_enum_try_into_method, r#" enum Value { Number(i32), Text(String)$0, }"#, r#"enum Value { Number(i32), Text(String), } impl Value { fn try_into_text(self) -> Result<String, Self> { if let Self::Text(v) = self { Ok(v) } else { Err(self) } } }"#, ); } #[test] fn test_generate_enum_try_into_already_implemented() { check_assist_not_applicable( generate_enum_try_into_method, r#"enum Value { Number(i32), Text(String)$0, } impl Value { fn try_into_text(self) -> Result<String, Self> { if let Self::Text(v) = self { Ok(v) } else { Err(self) } } }"#, ); } #[test] fn test_generate_enum_try_into_unit_variant() { check_assist_not_applicable( generate_enum_try_into_method, r#"enum Value { Number(i32), Text(String), Unit$0, }"#, ); } #[test] fn test_generate_enum_try_into_record_with_multiple_fields() { check_assist_not_applicable( generate_enum_try_into_method, r#"enum Value { Number(i32), Text(String), Both { first: i32, second: String }$0, }"#, ); } #[test] fn test_generate_enum_try_into_tuple_with_multiple_fields() { check_assist_not_applicable( generate_enum_try_into_method, r#"enum Value { Number(i32), Text(String, String)$0, }"#, ); } #[test] fn test_generate_enum_try_into_record_variant() { check_assist( generate_enum_try_into_method, r#"enum Value { Number(i32), Text { text: String }$0, }"#, r#"enum Value { Number(i32), Text { text: String }, } impl Value { fn try_into_text(self) -> Result<String, Self> { if let Self::Text { text } = self { Ok(text) } else { Err(self) } } }"#, ); } #[test] fn test_generate_enum_as_tuple_variant() { check_assist( generate_enum_as_method, r#" enum Value { Number(i32), Text(String)$0, }"#, r#"enum Value { Number(i32), Text(String), } impl Value { fn as_text(&self) -> Option<&String> { if let Self::Text(v) = self { Some(v) } else { None } } }"#, ); } #[test] fn test_generate_enum_as_record_variant() { check_assist( generate_enum_as_method, r#"enum Value { Number(i32), Text { text: String }$0, }"#, r#"enum Value { Number(i32), Text { text: String }, } impl Value { fn as_text(&self) -> Option<&String> { if let Self::Text { text } = self { Some(text) } else { None } } }"#, ); } }
22.870482
100
0.527591
1da6d96af66b08513c7a7443e06c64c007a7c34a
3,423
mod app; mod challenges; mod colors; mod common; mod cutscene; mod debug; mod devtools; mod edit; mod game; mod helpers; mod info; mod layer; mod managed; mod options; mod pregame; mod render; mod sandbox; use crate::app::Flags; use abstutil::CmdArgs; use sim::SimFlags; fn main() { let mut args = CmdArgs::new(); if args.enabled("--prebake") { challenges::prebake_all(); return; } let mut flags = Flags { sim_flags: SimFlags::from_args(&mut args), draw_lane_markings: !args.enabled("--dont_draw_lane_markings"), num_agents: args.optional_parse("--num_agents", |s| s.parse()), }; let mut opts = options::Options::default(); opts.dev = args.enabled("--dev"); if args.enabled("--lowzoom") { opts.min_zoom_for_detail = 1.0; } if let Some(x) = args.optional("--color_scheme") { let mut ok = false; let mut options = Vec::new(); for c in colors::ColorSchemeChoice::choices() { options.push(c.label.clone()); if c.label == x { opts.color_scheme = c.data; ok = true; break; } } if !ok { panic!( "Invalid --color_scheme={}. Choices: {}", x, options.join(", ") ); } } let mut settings = ezgui::Settings::new("A/B Street", "../data/system/fonts"); settings.window_icon("../data/system/assets/pregame/icon.png"); if args.enabled("--enable_profiler") { settings.enable_profiling(); } if args.enabled("--dump_raw_events") { settings.dump_raw_events(); } if let Some(n) = args.optional_parse("--font_size", |s| s.parse::<usize>()) { settings.default_font_size(n); } if let Some(s) = args.optional_parse("--scale_factor", |s| s.parse::<f64>()) { settings.scale_factor(s); } let mut mode = None; if let Some(x) = args.optional("--challenge") { let mut aliases = Vec::new(); 'OUTER: for (_, stages) in challenges::Challenge::all() { for challenge in stages { if challenge.alias == x { flags.sim_flags.load = challenge.gameplay.map_path(); mode = Some(challenge.gameplay); break 'OUTER; } else { aliases.push(challenge.alias); } } } if mode.is_none() { panic!("Invalid --challenge={}. Choices: {}", x, aliases.join(", ")); } } // TODO Stage only, not part if let Some(n) = args.optional_parse("--tutorial", |s| s.parse::<usize>()) { mode = Some(sandbox::GameplayMode::Tutorial( sandbox::TutorialPointer::new(n - 1, 0), )); } if mode.is_none() && flags.sim_flags.load.contains("scenarios/") { // TODO regex let parts = flags.sim_flags.load.split("/").collect::<Vec<_>>(); let map_path = abstutil::path_map(parts[4]); let scenario = abstutil::basename(parts[5]); flags.sim_flags.load = map_path.clone(); mode = Some(sandbox::GameplayMode::PlayScenario(map_path, scenario)); } let start_with_edits = args.optional("--edits"); args.done(); ezgui::run(settings, |ctx| { game::Game::new(flags, opts, start_with_edits, mode, ctx) }); }
29.508621
82
0.546304
69491c297593112bfd226504cfc583932524a12e
5,702
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. pub use crate::features::entries; use libc::{self, gid_t, lchown, uid_t}; use std::io::Error as IOError; use std::io::Result as IOResult; use std::ffi::CString; use std::fs::Metadata; use std::os::unix::fs::MetadataExt; use std::os::unix::ffi::OsStrExt; use std::path::Path; /// The various level of verbosity #[derive(PartialEq, Clone, Debug)] pub enum Verbosity { Silent, Changes, Verbose, Normal, } /// Actually perform the change of group on a path fn chgrp<P: AsRef<Path>>(path: P, gid: gid_t, follow: bool) -> IOResult<()> { let path = path.as_ref(); let s = CString::new(path.as_os_str().as_bytes()).unwrap(); let ret = unsafe { if follow { libc::chown(s.as_ptr(), 0_u32.wrapping_sub(1), gid) } else { lchown(s.as_ptr(), 0_u32.wrapping_sub(1), gid) } }; if ret == 0 { Ok(()) } else { Err(IOError::last_os_error()) } } /// Perform the change of group on a path /// with the various options /// and error messages management pub fn wrap_chgrp<P: AsRef<Path>>( path: P, meta: &Metadata, dest_gid: Option<gid_t>, follow: bool, verbosity: Verbosity, ) -> Result<String, String> { use self::Verbosity::*; let path = path.as_ref(); let mut out: String = String::new(); let dest_gid = dest_gid.unwrap_or_else(|| meta.gid()); if let Err(e) = chgrp(path, dest_gid, follow) { match verbosity { Silent => (), _ => { out = format!("changing group of '{}': {}", path.display(), e); if verbosity == Verbose { out = format!( "{}\nfailed to change group of '{}' from {} to {}", out, path.display(), entries::gid2grp(meta.gid()).unwrap(), entries::gid2grp(dest_gid).unwrap() ); }; } } return Err(out); } else { let changed = dest_gid != meta.gid(); if changed { match verbosity { Changes | Verbose => { out = format!( "changed group of '{}' from {} to {}", path.display(), entries::gid2grp(meta.gid()).unwrap(), entries::gid2grp(dest_gid).unwrap() ); } _ => (), }; } else if verbosity == Verbose { out = format!( "group of '{}' retained as {}", path.display(), entries::gid2grp(dest_gid).unwrap_or_default() ); } } Ok(out) } /// Actually perform the change of owner on a path fn chown<P: AsRef<Path>>(path: P, uid: uid_t, gid: gid_t, follow: bool) -> IOResult<()> { let path = path.as_ref(); let s = CString::new(path.as_os_str().as_bytes()).unwrap(); let ret = unsafe { if follow { libc::chown(s.as_ptr(), uid, gid) } else { lchown(s.as_ptr(), uid, gid) } }; if ret == 0 { Ok(()) } else { Err(IOError::last_os_error()) } } /// Perform the change of owner on a path /// with the various options /// and error messages management pub fn wrap_chown<P: AsRef<Path>>( path: P, meta: &Metadata, dest_uid: Option<u32>, dest_gid: Option<u32>, follow: bool, verbosity: Verbosity, ) -> Result<String, String> { use self::Verbosity::*; let dest_uid = dest_uid.unwrap_or_else(|| meta.uid()); let dest_gid = dest_gid.unwrap_or_else(|| meta.gid()); let path = path.as_ref(); let mut out: String = String::new(); if let Err(e) = chown(path, dest_uid, dest_gid, follow) { match verbosity { Silent => (), _ => { out = format!("changing ownership of '{}': {}", path.display(), e); if verbosity == Verbose { out = format!( "{}\nfailed to change ownership of '{}' from {}:{} to {}:{}", out, path.display(), entries::uid2usr(meta.uid()).unwrap(), entries::gid2grp(meta.gid()).unwrap(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); }; } } return Err(out); } else { let changed = dest_uid != meta.uid() || dest_gid != meta.gid(); if changed { match verbosity { Changes | Verbose => { out = format!( "changed ownership of '{}' from {}:{} to {}:{}", path.display(), entries::uid2usr(meta.uid()).unwrap(), entries::gid2grp(meta.gid()).unwrap(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); } _ => (), }; } else if verbosity == Verbose { out = format!( "ownership of '{}' retained as {}:{}", path.display(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); } } Ok(out) }
30.98913
89
0.476324
75fb31c13ebdf3d4a634412460dbd646f527477a
2,450
use super::{DateTime, NodeId, ReactionSummary, Repository, User}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize)] pub struct Issue { pub id: u64, pub node_id: NodeId, pub url: String, pub repository_url: String, pub labels_url: String, pub comments_url: String, pub events_url: String, pub html_url: String, pub number: u64, pub state: State, pub title: String, pub body: Option<String>, pub user: User, pub labels: Vec<Label>, pub assignee: Option<User>, pub assignees: Vec<User>, pub milestone: Option<Milestone>, pub reactions: Option<ReactionSummary>, pub locked: bool, pub active_lock_reason: Option<String>, pub comments: u64, pub pull_request: Option<PullRequestRef>, pub closed_at: Option<DateTime>, pub created_at: DateTime, pub updated_at: DateTime, pub repository: Option<Repository>, } impl Issue { pub fn is_pull_request(&self) -> bool { self.pull_request.is_some() } } #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum State { Open, Closed, } #[derive(Clone, Debug, Deserialize)] pub struct PullRequestRef { pub url: String, pub html_url: String, pub diff_url: String, pub patch_url: String, } #[derive(Clone, Debug, Deserialize)] pub struct Milestone { pub url: String, pub html_url: String, pub labels_url: String, pub id: u64, pub node_id: NodeId, pub number: u64, pub state: State, pub title: String, pub description: Option<String>, pub creator: User, pub open_issues: u64, pub closed_issues: u64, pub created_at: DateTime, pub updated_at: DateTime, pub closed_at: Option<DateTime>, pub due_on: DateTime, } #[derive(Clone, Debug, Deserialize)] pub struct Comment { pub id: u64, pub node_id: NodeId, pub url: String, pub html_url: String, pub body: Option<String>, pub reactions: Option<ReactionSummary>, pub user: User, pub created_at: DateTime, pub updated_at: DateTime, } impl Comment { pub fn body(&self) -> Option<&str> { self.body.as_ref().map(AsRef::as_ref) } } #[derive(Clone, Debug, Deserialize)] pub struct Label { pub id: u64, pub node_id: NodeId, pub url: String, pub name: String, pub description: Option<String>, pub color: String, pub default: bool, }
23.557692
65
0.658367
de036c4e6a8a18be608e085c1ef4ba7de40e4d4c
35,014
//! A multi-producer, single-consumer queue for sending values across //! asynchronous tasks. //! //! Similarly to the `std`, channel creation provides [`Receiver`] and //! [`Sender`] handles. [`Receiver`] implements [`Stream`] and allows a task to //! read values out of the channel. If there is no message to read from the //! channel, the current task will be notified when a new value is sent. //! [`Sender`] implements the `Sink` trait and allows a task to send messages into //! the channel. If the channel is at capacity, the send will be rejected and //! the task will be notified when additional capacity is available. In other //! words, the channel provides backpressure. //! //! Unbounded channels are also available using the `unbounded` constructor. //! //! # Disconnection //! //! When all [`Sender`] handles have been dropped, it is no longer //! possible to send values into the channel. This is considered the termination //! event of the stream. As such, [`Receiver::poll_next`] //! will return `Ok(Ready(None))`. //! //! If the [`Receiver`] handle is dropped, then messages can no longer //! be read out of the channel. In this case, all further attempts to send will //! result in an error. //! //! # Clean Shutdown //! //! If the [`Receiver`] is simply dropped, then it is possible for //! there to be messages still in the channel that will not be processed. As //! such, it is usually desirable to perform a "clean" shutdown. To do this, the //! receiver will first call `close`, which will prevent any further messages to //! be sent into the channel. Then, the receiver consumes the channel to //! completion, at which point the receiver can be dropped. //! //! [`Sender`]: struct.Sender.html //! [`Receiver`]: struct.Receiver.html //! [`Stream`]: ../../futures_core/stream/trait.Stream.html //! [`Receiver::poll_next`]: //! ../../futures_core/stream/trait.Stream.html#tymethod.poll_next // At the core, the channel uses an atomic FIFO queue for message passing. This // queue is used as the primary coordination primitive. In order to enforce // capacity limits and handle back pressure, a secondary FIFO queue is used to // send parked task handles. // // The general idea is that the channel is created with a `buffer` size of `n`. // The channel capacity is `n + num-senders`. Each sender gets one "guaranteed" // slot to hold a message. This allows `Sender` to know for a fact that a send // will succeed *before* starting to do the actual work of sending the value. // Since most of this work is lock-free, once the work starts, it is impossible // to safely revert. // // If the sender is unable to process a send operation, then the current // task is parked and the handle is sent on the parked task queue. // // Note that the implementation guarantees that the channel capacity will never // exceed the configured limit, however there is no *strict* guarantee that the // receiver will wake up a parked task *immediately* when a slot becomes // available. However, it will almost always unpark a task when a slot becomes // available and it is *guaranteed* that a sender will be unparked when the // message that caused the sender to become parked is read out of the channel. // // The steps for sending a message are roughly: // // 1) Increment the channel message count // 2) If the channel is at capacity, push the task handle onto the wait queue // 3) Push the message onto the message queue. // // The steps for receiving a message are roughly: // // 1) Pop a message from the message queue // 2) Pop a task handle from the wait queue // 3) Decrement the channel message count. // // It's important for the order of operations on lock-free structures to happen // in reverse order between the sender and receiver. This makes the message // queue the primary coordination structure and establishes the necessary // happens-before semantics required for the acquire / release semantics used // by the queue structure. use futures_core::stream::Stream; use futures_core::task::{self, Waker, Poll}; use std::any::Any; use std::error::Error; use std::fmt; use std::marker::Unpin; use std::mem::PinMut; use std::sync::{Arc, Mutex}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::SeqCst; use std::thread; use std::usize; use crate::mpsc::queue::{Queue, PopResult}; mod queue; /// The transmission end of a bounded mpsc channel. /// /// This value is created by the [`channel`](channel) function. #[derive(Debug)] pub struct Sender<T> { // Channel state shared between the sender and receiver. inner: Arc<Inner<T>>, // Handle to the task that is blocked on this sender. This handle is sent // to the receiver half in order to be notified when the sender becomes // unblocked. sender_task: Arc<Mutex<SenderTask>>, // True if the sender might be blocked. This is an optimization to avoid // having to lock the mutex most of the time. maybe_parked: bool, } // We never project PinMut<Sender> to `PinMut<T>` impl<T> Unpin for Sender<T> {} /// The transmission end of an unbounded mpsc channel. /// /// This value is created by the [`unbounded`](unbounded) function. #[derive(Debug)] pub struct UnboundedSender<T>(Sender<T>); trait AssertKinds: Send + Sync + Clone {} impl AssertKinds for UnboundedSender<u32> {} /// The receiving end of a bounded mpsc channel. /// /// This value is created by the [`channel`](channel) function. #[derive(Debug)] pub struct Receiver<T> { inner: Arc<Inner<T>>, } /// The receiving end of an unbounded mpsc channel. /// /// This value is created by the [`unbounded`](unbounded) function. #[derive(Debug)] pub struct UnboundedReceiver<T>(Receiver<T>); // `PinMut<UnboundedReceiver<T>>` is never projected to `PinMut<T>` impl<T> Unpin for UnboundedReceiver<T> {} /// The error type for [`Sender`s](Sender) used as `Sink`s. #[derive(Clone, Debug, PartialEq, Eq)] pub struct SendError { kind: SendErrorKind, } /// The error type returned from [`try_send`](Sender::try_send). #[derive(Clone, PartialEq, Eq)] pub struct TrySendError<T> { err: SendError, val: T, } #[derive(Clone, Debug, PartialEq, Eq)] enum SendErrorKind { Full, Disconnected, } /// The error type returned from [`try_next`](Receiver::try_next). pub struct TryRecvError { _inner: (), } impl fmt::Display for SendError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { if self.is_full() { write!(fmt, "send failed because channel is full") } else { write!(fmt, "send failed because receiver is gone") } } } impl Error for SendError { fn description(&self) -> &str { if self.is_full() { "send failed because channel is full" } else { "send failed because receiver is gone" } } } impl SendError { /// Returns true if this error is a result of the channel being full. pub fn is_full(&self) -> bool { match self.kind { SendErrorKind::Full => true, _ => false, } } /// Returns true if this error is a result of the receiver being dropped. pub fn is_disconnected(&self) -> bool { match self.kind { SendErrorKind::Disconnected => true, _ => false, } } } impl<T> fmt::Debug for TrySendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("TrySendError") .field("kind", &self.err.kind) .finish() } } impl<T> fmt::Display for TrySendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { if self.is_full() { write!(fmt, "send failed because channel is full") } else { write!(fmt, "send failed because receiver is gone") } } } impl<T: Any> Error for TrySendError<T> { fn description(&self) -> &str { if self.is_full() { "send failed because channel is full" } else { "send failed because receiver is gone" } } } impl<T> TrySendError<T> { /// Returns true if this error is a result of the channel being full. pub fn is_full(&self) -> bool { self.err.is_full() } /// Returns true if this error is a result of the receiver being dropped. pub fn is_disconnected(&self) -> bool { self.err.is_disconnected() } /// Returns the message that was attempted to be sent but failed. pub fn into_inner(self) -> T { self.val } /// Drops the message and converts into a `SendError`. pub fn into_send_error(self) -> SendError { self.err } } impl fmt::Debug for TryRecvError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("TryRecvError") .finish() } } impl fmt::Display for TryRecvError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.write_str(self.description()) } } impl Error for TryRecvError { fn description(&self) -> &str { "receiver channel is empty" } } #[derive(Debug)] struct Inner<T> { // Max buffer size of the channel. If `None` then the channel is unbounded. buffer: Option<usize>, // Internal channel state. Consists of the number of messages stored in the // channel as well as a flag signalling that the channel is closed. state: AtomicUsize, // Atomic, FIFO queue used to send messages to the receiver message_queue: Queue<Option<T>>, // Atomic, FIFO queue used to send parked task handles to the receiver. parked_queue: Queue<Arc<Mutex<SenderTask>>>, // Number of senders in existence num_senders: AtomicUsize, // Handle to the receiver's task. recv_task: Mutex<ReceiverTask>, } // Struct representation of `Inner::state`. #[derive(Debug, Clone, Copy)] struct State { // `true` when the channel is open is_open: bool, // Number of messages in the channel num_messages: usize, } #[derive(Debug)] struct ReceiverTask { unparked: bool, task: Option<Waker>, } // Returned from Receiver::try_park() enum TryPark { Parked, Closed, NotEmpty, } // The `is_open` flag is stored in the left-most bit of `Inner::state` const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1); // When a new channel is created, it is created in the open state with no // pending messages. const INIT_STATE: usize = OPEN_MASK; // The maximum number of messages that a channel can track is `usize::MAX >> 1` const MAX_CAPACITY: usize = !(OPEN_MASK); // The maximum requested buffer size must be less than the maximum capacity of // a channel. This is because each sender gets a guaranteed slot. const MAX_BUFFER: usize = MAX_CAPACITY >> 1; // Sent to the consumer to wake up blocked producers #[derive(Debug)] struct SenderTask { task: Option<Waker>, is_parked: bool, } impl SenderTask { fn new() -> Self { SenderTask { task: None, is_parked: false, } } fn notify(&mut self) { self.is_parked = false; if let Some(task) = self.task.take() { task.wake(); } } } /// Creates a bounded mpsc channel for communicating between asynchronous tasks. /// /// Being bounded, this channel provides backpressure to ensure that the sender /// outpaces the receiver by only a limited amount. The channel's capacity is /// equal to `buffer + num-senders`. In other words, each sender gets a /// guaranteed slot in the channel capacity, and on top of that there are /// `buffer` "first come, first serve" slots available to all senders. /// /// The [`Receiver`](Receiver) returned implements the /// [`Stream`](futures_core::stream::Stream) trait, while [`Sender`](Sender) implements /// `Sink`. pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { // Check that the requested buffer size does not exceed the maximum buffer // size permitted by the system. assert!(buffer < MAX_BUFFER, "requested buffer size too large"); channel2(Some(buffer)) } /// Creates an unbounded mpsc channel for communicating between asynchronous /// tasks. /// /// A `send` on this channel will always succeed as long as the receive half has /// not been closed. If the receiver falls behind, messages will be arbitrarily /// buffered. /// /// **Note** that the amount of available system memory is an implicit bound to /// the channel. Using an `unbounded` channel has the ability of causing the /// process to run out of memory. In this case, the process will be aborted. pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { let (tx, rx) = channel2(None); (UnboundedSender(tx), UnboundedReceiver(rx)) } fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { let inner = Arc::new(Inner { buffer, state: AtomicUsize::new(INIT_STATE), message_queue: Queue::new(), parked_queue: Queue::new(), num_senders: AtomicUsize::new(1), recv_task: Mutex::new(ReceiverTask { unparked: false, task: None, }), }); let tx = Sender { inner: inner.clone(), sender_task: Arc::new(Mutex::new(SenderTask::new())), maybe_parked: false, }; let rx = Receiver { inner, }; (tx, rx) } /* * * ===== impl Sender ===== * */ impl<T> Sender<T> { /// Attempts to send a message on this `Sender`, returning the message /// if there was an error. pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> { // If the sender is currently blocked, reject the message if !self.poll_unparked(None).is_ready() { return Err(TrySendError { err: SendError { kind: SendErrorKind::Full, }, val: msg, }); } // The channel has capacity to accept the message, so send it self.do_send(None, msg) } /// Send a message on the channel. /// /// This function should only be called after /// [`poll_ready`](Sender::poll_ready) has reported that the channel is /// ready to receive a message. pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { self.try_send(msg) .map_err(|e| e.err) } // Do the send without failing // None means close fn do_send(&mut self, cx: Option<&mut task::Context>, msg: T) -> Result<(), TrySendError<T>> { // Anyone callig do_send *should* make sure there is room first, // but assert here for tests as a sanity check. debug_assert!(self.poll_unparked(None).is_ready()); // First, increment the number of messages contained by the channel. // This operation will also atomically determine if the sender task // should be parked. // // None is returned in the case that the channel has been closed by the // receiver. This happens when `Receiver::close` is called or the // receiver is dropped. let park_self = match self.inc_num_messages(false) { Some(park_self) => park_self, None => return Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected, }, val: msg, }), }; // If the channel has reached capacity, then the sender task needs to // be parked. This will send the task handle on the parked task queue. // // However, when `do_send` is called while dropping the `Sender`, // `task::current()` can't be called safely. In this case, in order to // maintain internal consistency, a blank message is pushed onto the // parked task queue. if park_self { self.park(cx); } self.queue_push_and_signal(Some(msg)); Ok(()) } // Do the send without parking current task. fn do_send_nb(&self, msg: Option<T>) -> Result<(), TrySendError<T>> { match self.inc_num_messages(msg.is_none()) { Some(park_self) => assert!(!park_self), None => { // The receiver has closed the channel. Only abort if actually // sending a message. It is important that the stream // termination (None) is always sent. This technically means // that it is possible for the queue to contain the following // number of messages: // // num-senders + buffer + 1 // if let Some(msg) = msg { return Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected, }, val: msg, }); } else { return Ok(()); } }, }; self.queue_push_and_signal(msg); Ok(()) } fn poll_ready_nb(&self) -> Poll<Result<(), SendError>> { let state = decode_state(self.inner.state.load(SeqCst)); if state.is_open { Poll::Ready(Ok(())) } else { Poll::Ready(Err(SendError { kind: SendErrorKind::Full, })) } } // Push message to the queue and signal to the receiver fn queue_push_and_signal(&self, msg: Option<T>) { // Push the message onto the message queue self.inner.message_queue.push(msg); // Signal to the receiver that a message has been enqueued. If the // receiver is parked, this will unpark the task. self.signal(); } // Increment the number of queued messages. Returns if the sender should // block. fn inc_num_messages(&self, close: bool) -> Option<bool> { let mut curr = self.inner.state.load(SeqCst); loop { let mut state = decode_state(curr); // The receiver end closed the channel. if !state.is_open { return None; } // This probably is never hit? Odds are the process will run out of // memory first. It may be worth to return something else in this // case? assert!(state.num_messages < MAX_CAPACITY, "buffer space \ exhausted; sending this messages would overflow the state"); state.num_messages += 1; // The channel is closed by all sender handles being dropped. if close { state.is_open = false; } let next = encode_state(&state); match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { Ok(_) => { // Block if the current number of pending messages has exceeded // the configured buffer size let park_self = !close && match self.inner.buffer { Some(buffer) => state.num_messages > buffer, None => false, }; return Some(park_self) } Err(actual) => curr = actual, } } } // Signal to the receiver task that a message has been enqueued fn signal(&self) { // TODO // This logic can probably be improved by guarding the lock with an // atomic. // // Do this step first so that the lock is dropped when // `unpark` is called let task = { let mut recv_task = self.inner.recv_task.lock().unwrap(); // If the receiver has already been unparked, then there is nothing // more to do if recv_task.unparked { return; } // Setting this flag enables the receiving end to detect that // an unpark event happened in order to avoid unnecessarily // parking. recv_task.unparked = true; recv_task.task.take() }; if let Some(task) = task { task.wake(); } } fn park(&mut self, cx: Option<&mut task::Context>) { // TODO: clean up internal state if the task::current will fail let task = cx.map(|cx| cx.waker().clone()); { let mut sender = self.sender_task.lock().unwrap(); sender.task = task; sender.is_parked = true; } // Send handle over queue let t = self.sender_task.clone(); self.inner.parked_queue.push(t); // Check to make sure we weren't closed after we sent our task on the // queue let state = decode_state(self.inner.state.load(SeqCst)); self.maybe_parked = state.is_open; } /// Polls the channel to determine if there is guaranteed capacity to send /// at least one item without waiting. /// /// # Return value /// /// This method returns: /// /// - `Ok(Async::Ready(_))` if there is sufficient capacity; /// - `Ok(Async::Pending)` if the channel may not have /// capacity, in which case the current task is queued to be notified once /// capacity is available; /// - `Err(SendError)` if the receiver has been dropped. pub fn poll_ready( &mut self, cx: &mut task::Context ) -> Poll<Result<(), SendError>> { let state = decode_state(self.inner.state.load(SeqCst)); if !state.is_open { return Poll::Ready(Err(SendError { kind: SendErrorKind::Disconnected, })); } self.poll_unparked(Some(cx)).map(Ok) } /// Returns whether this channel is closed without needing a context. pub fn is_closed(&self) -> bool { !decode_state(self.inner.state.load(SeqCst)).is_open } /// Closes this channel from the sender side, preventing any new messages. pub fn close_channel(&mut self) { // There's no need to park this sender, its dropping, // and we don't want to check for capacity, so skip // that stuff from `do_send`. let _ = self.do_send_nb(None); } fn poll_unparked(&mut self, cx: Option<&mut task::Context>) -> Poll<()> { // First check the `maybe_parked` variable. This avoids acquiring the // lock in most cases if self.maybe_parked { // Get a lock on the task handle let mut task = self.sender_task.lock().unwrap(); if !task.is_parked { self.maybe_parked = false; return Poll::Ready(()) } // At this point, an unpark request is pending, so there will be an // unpark sometime in the future. We just need to make sure that // the correct task will be notified. // // Update the task in case the `Sender` has been moved to another // task task.task = cx.map(|cx| cx.waker().clone()); Poll::Pending } else { Poll::Ready(()) } } } impl<T> UnboundedSender<T> { /// Check if the channel is ready to receive a message. pub fn poll_ready( &self, _: &mut task::Context, ) -> Poll<Result<(), SendError>> { self.0.poll_ready_nb() } /// Returns whether this channel is closed without needing a context. pub fn is_closed(&self) -> bool { self.0.is_closed() } /// Closes this channel from the sender side, preventing any new messages. pub fn close_channel(&self) { // There's no need to park this sender, its dropping, // and we don't want to check for capacity, so skip // that stuff from `do_send`. let _ = self.0.do_send_nb(None); } /// Send a message on the channel. /// /// This method should only be called after `poll_ready` has been used to /// verify that the channel is ready to receive a message. pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { self.0.do_send_nb(Some(msg)) .map_err(|e| e.err) } /// Sends a message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError<T>> { self.0.do_send_nb(Some(msg)) } } impl<T> Clone for UnboundedSender<T> { fn clone(&self) -> UnboundedSender<T> { UnboundedSender(self.0.clone()) } } impl<T> Clone for Sender<T> { fn clone(&self) -> Sender<T> { // Since this atomic op isn't actually guarding any memory and we don't // care about any orderings besides the ordering on the single atomic // variable, a relaxed ordering is acceptable. let mut curr = self.inner.num_senders.load(SeqCst); loop { // If the maximum number of senders has been reached, then fail if curr == self.inner.max_senders() { panic!("cannot clone `Sender` -- too many outstanding senders"); } debug_assert!(curr < self.inner.max_senders()); let next = curr + 1; let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst); // The ABA problem doesn't matter here. We only care that the // number of senders never exceeds the maximum. if actual == curr { return Sender { inner: self.inner.clone(), sender_task: Arc::new(Mutex::new(SenderTask::new())), maybe_parked: false, }; } curr = actual; } } } impl<T> Drop for Sender<T> { fn drop(&mut self) { // Ordering between variables don't matter here let prev = self.inner.num_senders.fetch_sub(1, SeqCst); if prev == 1 { // There's no need to park this sender, its dropping, // and we don't want to check for capacity, so skip // that stuff from `do_send`. let _ = self.do_send_nb(None); } } } /* * * ===== impl Receiver ===== * */ impl<T> Receiver<T> { /// Closes the receiving half of a channel, without dropping it. /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { let mut curr = self.inner.state.load(SeqCst); loop { let mut state = decode_state(curr); if !state.is_open { break } state.is_open = false; let next = encode_state(&state); match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { Ok(_) => break, Err(actual) => curr = actual, } } // Wake up any threads waiting as they'll see that we've closed the // channel and will continue on their merry way. loop { match unsafe { self.inner.parked_queue.pop() } { PopResult::Data(task) => { task.lock().unwrap().notify(); } PopResult::Empty => break, PopResult::Inconsistent => thread::yield_now(), } } } /// Tries to receive the next message without notifying a context if empty. /// /// It is not recommended to call this function from inside of a future, /// only when you've otherwise arranged to be notified when the channel is /// no longer empty. pub fn try_next(&mut self) -> Result<Option<T>, TryRecvError> { match self.next_message() { Poll::Ready(msg) => { Ok(msg) }, Poll::Pending => Err(TryRecvError { _inner: () }), } } fn next_message(&mut self) -> Poll<Option<T>> { // Pop off a message loop { match unsafe { self.inner.message_queue.pop() } { PopResult::Data(msg) => { // If there are any parked task handles in the parked queue, // pop one and unpark it. self.unpark_one(); // Decrement number of messages self.dec_num_messages(); return Poll::Ready(msg); } PopResult::Empty => { // The queue is empty, return Pending return Poll::Pending; } PopResult::Inconsistent => { // Inconsistent means that there will be a message to pop // in a short time. This branch can only be reached if // values are being produced from another thread, so there // are a few ways that we can deal with this: // // 1) Spin // 2) thread::yield_now() // 3) task::current().unwrap() & return Pending // // For now, thread::yield_now() is used, but it would // probably be better to spin a few times then yield. thread::yield_now(); } } } } // Unpark a single task handle if there is one pending in the parked queue fn unpark_one(&mut self) { loop { match unsafe { self.inner.parked_queue.pop() } { PopResult::Data(task) => { task.lock().unwrap().notify(); return; } PopResult::Empty => { // Queue empty, no task to wake up. return; } PopResult::Inconsistent => { // Same as above thread::yield_now(); } } } } // Try to park the receiver task fn try_park(&self, cx: &mut task::Context) -> TryPark { let curr = self.inner.state.load(SeqCst); let state = decode_state(curr); // If the channel is closed, then there is no need to park. if !state.is_open && state.num_messages == 0 { return TryPark::Closed; } // First, track the task in the `recv_task` slot let mut recv_task = self.inner.recv_task.lock().unwrap(); if recv_task.unparked { // Consume the `unpark` signal without actually parking recv_task.unparked = false; return TryPark::NotEmpty; } recv_task.task = Some(cx.waker().clone()); TryPark::Parked } fn dec_num_messages(&self) { let mut curr = self.inner.state.load(SeqCst); loop { let mut state = decode_state(curr); state.num_messages -= 1; let next = encode_state(&state); match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) { Ok(_) => break, Err(actual) => curr = actual, } } } } // The receiver does not ever take a PinMut to the inner T impl<T> Unpin for Receiver<T> {} impl<T> Stream for Receiver<T> { type Item = T; fn poll_next( mut self: PinMut<Self>, cx: &mut task::Context, ) -> Poll<Option<T>> { loop { // Try to read a message off of the message queue. let msg = match self.next_message() { Poll::Ready(msg) => msg, Poll::Pending => { // There are no messages to read, in this case, attempt to // park. The act of parking will verify that the channel is // still empty after the park operation has completed. match self.try_park(cx) { TryPark::Parked => { // The task was parked, and the channel is still // empty, return Pending. return Poll::Pending; } TryPark::Closed => { // The channel is closed, there will be no further // messages. return Poll::Ready(None); } TryPark::NotEmpty => { // A message has been sent while attempting to // park. Loop again, the next iteration is // guaranteed to get the message. continue; } } } }; // Return the message return Poll::Ready(msg); } } } impl<T> Drop for Receiver<T> { fn drop(&mut self) { // Drain the channel of all pending messages self.close(); while self.next_message().is_ready() { // ... } } } impl<T> UnboundedReceiver<T> { /// Closes the receiving half of the channel, without dropping it. /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { self.0.close(); } /// Tries to receive the next message without notifying a context if empty. /// /// It is not recommended to call this function from inside of a future, /// only when you've otherwise arranged to be notified when the channel is /// no longer empty. pub fn try_next(&mut self) -> Result<Option<T>, TryRecvError> { self.0.try_next() } } impl<T> Stream for UnboundedReceiver<T> { type Item = T; fn poll_next( mut self: PinMut<Self>, cx: &mut task::Context, ) -> Poll<Option<T>> { PinMut::new(&mut self.0).poll_next(cx) } } /* * * ===== impl Inner ===== * */ impl<T> Inner<T> { // The return value is such that the total number of messages that can be // enqueued into the channel will never exceed MAX_CAPACITY fn max_senders(&self) -> usize { match self.buffer { Some(buffer) => MAX_CAPACITY - buffer, None => MAX_BUFFER, } } } unsafe impl<T: Send> Send for Inner<T> {} unsafe impl<T: Send> Sync for Inner<T> {} /* * * ===== Helpers ===== * */ fn decode_state(num: usize) -> State { State { is_open: num & OPEN_MASK == OPEN_MASK, num_messages: num & MAX_CAPACITY, } } fn encode_state(state: &State) -> usize { let mut num = state.num_messages; if state.is_open { num |= OPEN_MASK; } num }
32.360444
87
0.580025
181c7e7d4bbdb24920c44f46c01a699bbcd9e5e9
45,113
use { solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, solana_sdk::{ account::{ReadableAccount, WritableAccount}, account_utils::State as AccountUtilsState, feature_set::{self, nonce_must_be_writable}, instruction::{checked_add, InstructionError}, keyed_account::KeyedAccount, nonce::{self, state::Versions, State}, pubkey::Pubkey, system_instruction::{nonce_to_instruction_error, NonceError}, sysvar::rent::Rent, }, std::collections::HashSet, }; pub trait NonceKeyedAccount { fn advance_nonce_account( &self, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError>; fn withdraw_nonce_account( &self, lamports: u64, to: &KeyedAccount, rent: &Rent, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError>; fn initialize_nonce_account( &self, nonce_authority: &Pubkey, rent: &Rent, invoke_context: &InvokeContext, ) -> Result<(), InstructionError>; fn authorize_nonce_account( &self, nonce_authority: &Pubkey, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError>; } impl<'a> NonceKeyedAccount for KeyedAccount<'a> { fn advance_nonce_account( &self, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError> { let merge_nonce_error_into_system_error = invoke_context .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); if invoke_context .feature_set .is_active(&nonce_must_be_writable::id()) && !self.is_writable() { ic_msg!( invoke_context, "Advance nonce account: Account {} must be writeable", self.unsigned_key() ); return Err(InstructionError::InvalidArgument); } let state = AccountUtilsState::<Versions>::state(self)?.convert_to_current(); match state { State::Initialized(data) => { if !signers.contains(&data.authority) { ic_msg!( invoke_context, "Advance nonce account: Account {} must be a signer", data.authority ); return Err(InstructionError::MissingRequiredSignature); } let recent_blockhash = invoke_context.blockhash; if data.blockhash == recent_blockhash { ic_msg!( invoke_context, "Advance nonce account: nonce can only advance once per slot" ); return Err(nonce_to_instruction_error( NonceError::NotExpired, merge_nonce_error_into_system_error, )); } let new_data = nonce::state::Data::new( data.authority, recent_blockhash, invoke_context.lamports_per_signature, ); self.set_state(&Versions::new_current(State::Initialized(new_data))) } _ => { ic_msg!( invoke_context, "Advance nonce account: Account {} state is invalid", self.unsigned_key() ); Err(nonce_to_instruction_error( NonceError::BadAccountState, merge_nonce_error_into_system_error, )) } } } fn withdraw_nonce_account( &self, lamports: u64, to: &KeyedAccount, rent: &Rent, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError> { let merge_nonce_error_into_system_error = invoke_context .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); if invoke_context .feature_set .is_active(&nonce_must_be_writable::id()) && !self.is_writable() { ic_msg!( invoke_context, "Withdraw nonce account: Account {} must be writeable", self.unsigned_key() ); return Err(InstructionError::InvalidArgument); } let signer = match AccountUtilsState::<Versions>::state(self)?.convert_to_current() { State::Uninitialized => { if lamports > self.lamports()? { ic_msg!( invoke_context, "Withdraw nonce account: insufficient lamports {}, need {}", self.lamports()?, lamports, ); return Err(InstructionError::InsufficientFunds); } *self.unsigned_key() } State::Initialized(ref data) => { if lamports == self.lamports()? { if data.blockhash == invoke_context.blockhash { ic_msg!( invoke_context, "Withdraw nonce account: nonce can only advance once per slot" ); return Err(nonce_to_instruction_error( NonceError::NotExpired, merge_nonce_error_into_system_error, )); } self.set_state(&Versions::new_current(State::Uninitialized))?; } else { let min_balance = rent.minimum_balance(self.data_len()?); let amount = checked_add(lamports, min_balance)?; if amount > self.lamports()? { ic_msg!( invoke_context, "Withdraw nonce account: insufficient lamports {}, need {}", self.lamports()?, amount, ); return Err(InstructionError::InsufficientFunds); } } data.authority } }; if !signers.contains(&signer) { ic_msg!( invoke_context, "Withdraw nonce account: Account {} must sign", signer ); return Err(InstructionError::MissingRequiredSignature); } let nonce_balance = self.try_account_ref_mut()?.lamports(); self.try_account_ref_mut()?.set_lamports( nonce_balance .checked_sub(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, ); let to_balance = to.try_account_ref_mut()?.lamports(); to.try_account_ref_mut()?.set_lamports( to_balance .checked_add(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, ); Ok(()) } fn initialize_nonce_account( &self, nonce_authority: &Pubkey, rent: &Rent, invoke_context: &InvokeContext, ) -> Result<(), InstructionError> { let merge_nonce_error_into_system_error = invoke_context .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); if invoke_context .feature_set .is_active(&nonce_must_be_writable::id()) && !self.is_writable() { ic_msg!( invoke_context, "Initialize nonce account: Account {} must be writeable", self.unsigned_key() ); return Err(InstructionError::InvalidArgument); } match AccountUtilsState::<Versions>::state(self)?.convert_to_current() { State::Uninitialized => { let min_balance = rent.minimum_balance(self.data_len()?); if self.lamports()? < min_balance { ic_msg!( invoke_context, "Initialize nonce account: insufficient lamports {}, need {}", self.lamports()?, min_balance ); return Err(InstructionError::InsufficientFunds); } let data = nonce::state::Data::new( *nonce_authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); self.set_state(&Versions::new_current(State::Initialized(data))) } _ => { ic_msg!( invoke_context, "Initialize nonce account: Account {} state is invalid", self.unsigned_key() ); Err(nonce_to_instruction_error( NonceError::BadAccountState, merge_nonce_error_into_system_error, )) } } } fn authorize_nonce_account( &self, nonce_authority: &Pubkey, signers: &HashSet<Pubkey>, invoke_context: &InvokeContext, ) -> Result<(), InstructionError> { let merge_nonce_error_into_system_error = invoke_context .feature_set .is_active(&feature_set::merge_nonce_error_into_system_error::id()); if invoke_context .feature_set .is_active(&nonce_must_be_writable::id()) && !self.is_writable() { ic_msg!( invoke_context, "Authorize nonce account: Account {} must be writeable", self.unsigned_key() ); return Err(InstructionError::InvalidArgument); } match AccountUtilsState::<Versions>::state(self)?.convert_to_current() { State::Initialized(data) => { if !signers.contains(&data.authority) { ic_msg!( invoke_context, "Authorize nonce account: Account {} must sign", data.authority ); return Err(InstructionError::MissingRequiredSignature); } let new_data = nonce::state::Data::new( *nonce_authority, data.blockhash, data.get_lamports_per_signature(), ); self.set_state(&Versions::new_current(State::Initialized(new_data))) } _ => { ic_msg!( invoke_context, "Authorize nonce account: Account {} state is invalid", self.unsigned_key() ); Err(nonce_to_instruction_error( NonceError::BadAccountState, merge_nonce_error_into_system_error, )) } } } } #[cfg(test)] mod test { use { super::*, solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ account::ReadableAccount, account_utils::State as AccountUtilsState, hash::{hash, Hash}, keyed_account::KeyedAccount, nonce::{self, State}, nonce_account::{create_account, verify_nonce_account}, system_instruction::SystemError, transaction_context::TransactionContext, }, }; fn with_mockup<F>(lamports: u64, signer: bool, mut f: F) where F: FnMut(&mut InvokeContext, &KeyedAccount), { let mut transaction_context = TransactionContext::new(Vec::new(), 1, 1); let mut invoke_context = InvokeContext::new_mock(&mut transaction_context, &[]); let pubkey = Pubkey::new_unique(); let account = create_account(lamports); let keyed_account = KeyedAccount::new(&pubkey, signer, &account); f(&mut invoke_context, &keyed_account) } fn set_invoke_context_blockhash(invoke_context: &mut InvokeContext, seed: usize) { invoke_context.blockhash = hash(&bincode::serialize(&seed).unwrap()); invoke_context.lamports_per_signature = (seed as u64).saturating_mul(100); } #[test] fn default_is_uninitialized() { assert_eq!(State::default(), State::Uninitialized) } #[test] fn keyed_account_expected_behavior() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, keyed_account| { let data = nonce::state::Data { authority: *keyed_account.unsigned_key(), ..nonce::state::Data::default() }; let mut signers = HashSet::new(); signers.insert(*keyed_account.signer_key().unwrap()); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); // New is in Uninitialzed state assert_eq!(state, State::Uninitialized); set_invoke_context_blockhash(invoke_context, 95); let authorized = keyed_account.unsigned_key(); keyed_account .initialize_nonce_account(authorized, &rent, invoke_context) .unwrap(); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( data.authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); // First nonce instruction drives state from Uninitialized to Initialized assert_eq!(state, State::Initialized(data.clone())); set_invoke_context_blockhash(invoke_context, 63); keyed_account .advance_nonce_account(&signers, invoke_context) .unwrap(); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( data.authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); // Second nonce instruction consumes and replaces stored nonce assert_eq!(state, State::Initialized(data.clone())); set_invoke_context_blockhash(invoke_context, 31); keyed_account .advance_nonce_account(&signers, invoke_context) .unwrap(); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( data.authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); // Third nonce instruction for fun and profit assert_eq!(state, State::Initialized(data)); with_mockup(42, false, |_invoke_context, to_keyed| { set_invoke_context_blockhash(invoke_context, 0); let withdraw_lamports = keyed_account.account.borrow().lamports(); let expect_nonce_lamports = keyed_account.account.borrow().lamports() - withdraw_lamports; let expect_to_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; keyed_account .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); // Empties Account balance assert_eq!( keyed_account.account.borrow().lamports(), expect_nonce_lamports ); // Account balance goes to `to` assert_eq!(to_keyed.account.borrow().lamports(), expect_to_lamports); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); // Empty balance deinitializes data assert_eq!(state, State::Uninitialized); }) }) } #[test] fn nonce_inx_initialized_account_not_signer_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { set_invoke_context_blockhash(invoke_context, 31); let authority = *nonce_account.unsigned_key(); nonce_account .initialize_nonce_account(&authority, &rent, invoke_context) .unwrap(); let pubkey = *nonce_account.account.borrow().owner(); let nonce_account = KeyedAccount::new(&pubkey, false, nonce_account.account); let state = AccountUtilsState::<Versions>::state(&nonce_account) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); assert_eq!(state, State::Initialized(data)); let signers = HashSet::new(); set_invoke_context_blockhash(invoke_context, 0); let result = nonce_account.advance_nonce_account(&signers, invoke_context); assert_eq!(result, Err(InstructionError::MissingRequiredSignature),); }) } #[test] fn nonce_inx_too_early_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, keyed_account| { let mut signers = HashSet::new(); signers.insert(*keyed_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 63); let authorized = *keyed_account.unsigned_key(); keyed_account .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); let result = keyed_account.advance_nonce_account(&signers, invoke_context); assert_eq!(result, Err(SystemError::NonceBlockhashNotExpired.into())); }) } #[test] fn nonce_inx_uninitialized_account_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, keyed_account| { let mut signers = HashSet::new(); signers.insert(*keyed_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 63); let result = keyed_account.advance_nonce_account(&signers, invoke_context); assert_eq!(result, Err(InstructionError::InvalidAccountData)); }) } #[test] fn nonce_inx_independent_nonce_authority_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { with_mockup(42, true, |_invoke_context, nonce_authority| { let mut signers = HashSet::new(); signers.insert(*nonce_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 63); let authorized = *nonce_authority.unsigned_key(); nonce_account .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_authority.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 31); let result = nonce_account.advance_nonce_account(&signers, invoke_context); assert_eq!(result, Ok(())); }); }); } #[test] fn nonce_inx_no_nonce_authority_sig_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { with_mockup(42, false, |_invoke_context, nonce_authority| { let mut signers = HashSet::new(); signers.insert(*nonce_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 63); let authorized = *nonce_authority.unsigned_key(); nonce_account .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); let result = nonce_account.advance_nonce_account(&signers, invoke_context); assert_eq!(result, Err(InstructionError::MissingRequiredSignature),); }); }); } #[test] fn withdraw_inx_unintialized_acc_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); with_mockup(42, false, |_invoke_context, to_keyed| { let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 0); let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let expect_nonce_lamports = nonce_keyed.account.borrow().lamports() - withdraw_lamports; let expect_to_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; nonce_keyed .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); // Withdraw instruction... // Deinitializes Account state assert_eq!(state, State::Uninitialized); // Empties Account balance assert_eq!( nonce_keyed.account.borrow().lamports(), expect_nonce_lamports ); // Account balance goes to `to` assert_eq!(to_keyed.account.borrow().lamports(), expect_to_lamports); }) }) } #[test] fn withdraw_inx_unintialized_acc_unsigned_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, false, |invoke_context, nonce_keyed| { let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); with_mockup(42, false, |_invoke_context, to_keyed| { let signers = HashSet::new(); set_invoke_context_blockhash(invoke_context, 0); let lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(InstructionError::MissingRequiredSignature),); }) }) } #[test] fn withdraw_inx_unintialized_acc_insuff_funds_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); with_mockup(42, false, |_invoke_context, to_keyed| { let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 0); let lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); }) }) } #[test] fn withdraw_inx_uninitialized_acc_two_withdraws_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { with_mockup(42, false, |_invoke_context, to_keyed| { let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 0); let withdraw_lamports = nonce_keyed.account.borrow().lamports() / 2; let nonce_expect_lamports = nonce_keyed.account.borrow().lamports() - withdraw_lamports; let to_expect_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; nonce_keyed .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); assert_eq!( nonce_keyed.account.borrow().lamports(), nonce_expect_lamports ); assert_eq!(to_keyed.account.borrow().lamports(), to_expect_lamports); let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let nonce_expect_lamports = nonce_keyed.account.borrow().lamports() - withdraw_lamports; let to_expect_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; nonce_keyed .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); assert_eq!( nonce_keyed.account.borrow().lamports(), nonce_expect_lamports ); assert_eq!(to_keyed.account.borrow().lamports(), to_expect_lamports); }) }) } #[test] fn withdraw_inx_initialized_acc_two_withdraws_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 31); let authority = *nonce_keyed.unsigned_key(); nonce_keyed .initialize_nonce_account(&authority, &rent, invoke_context) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); assert_eq!(state, State::Initialized(data.clone())); with_mockup(42, false, |_invoke_context, to_keyed| { let withdraw_lamports = nonce_keyed.account.borrow().lamports() - min_lamports; let nonce_expect_lamports = nonce_keyed.account.borrow().lamports() - withdraw_lamports; let to_expect_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; nonce_keyed .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); let data = nonce::state::Data::new( data.authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); assert_eq!(state, State::Initialized(data)); assert_eq!( nonce_keyed.account.borrow().lamports(), nonce_expect_lamports ); assert_eq!(to_keyed.account.borrow().lamports(), to_expect_lamports); set_invoke_context_blockhash(invoke_context, 0); let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let nonce_expect_lamports = nonce_keyed.account.borrow().lamports() - withdraw_lamports; let to_expect_lamports = to_keyed.account.borrow().lamports() + withdraw_lamports; nonce_keyed .withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ) .unwrap(); let state = AccountUtilsState::<Versions>::state(nonce_keyed) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); assert_eq!( nonce_keyed.account.borrow().lamports(), nonce_expect_lamports ); assert_eq!(to_keyed.account.borrow().lamports(), to_expect_lamports); }) }) } #[test] fn withdraw_inx_initialized_acc_nonce_too_early_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { set_invoke_context_blockhash(invoke_context, 0); let authorized = *nonce_keyed.unsigned_key(); nonce_keyed .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); with_mockup(42, false, |_invoke_context, to_keyed| { let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(SystemError::NonceBlockhashNotExpired.into())); }) }) } #[test] fn withdraw_inx_initialized_acc_insuff_funds_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { set_invoke_context_blockhash(invoke_context, 95); let authorized = *nonce_keyed.unsigned_key(); nonce_keyed .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); with_mockup(42, false, |_invoke_context, to_keyed| { set_invoke_context_blockhash(invoke_context, 63); let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); }) }) } #[test] fn withdraw_inx_initialized_acc_insuff_rent_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { set_invoke_context_blockhash(invoke_context, 95); let authorized = *nonce_keyed.unsigned_key(); nonce_keyed .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); with_mockup(42, false, |_invoke_context, to_keyed| { set_invoke_context_blockhash(invoke_context, 63); let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports() - min_lamports + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); }) }) } #[test] fn withdraw_inx_overflow() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_keyed| { set_invoke_context_blockhash(invoke_context, 95); let authorized = *nonce_keyed.unsigned_key(); nonce_keyed .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); with_mockup(55, false, |_invoke_context, to_keyed| { set_invoke_context_blockhash(invoke_context, 63); let mut signers = HashSet::new(); signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = u64::MAX - 54; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, to_keyed, &rent, &signers, invoke_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); }) }) } #[test] fn initialize_inx_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, keyed_account| { let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); assert_eq!(state, State::Uninitialized); let mut signers = HashSet::new(); signers.insert(*keyed_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 0); let authority = *keyed_account.unsigned_key(); let result = keyed_account.initialize_nonce_account(&authority, &rent, invoke_context); let data = nonce::state::Data::new( authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); assert_eq!(result, Ok(())); let state = AccountUtilsState::<Versions>::state(keyed_account) .unwrap() .convert_to_current(); assert_eq!(state, State::Initialized(data)); }) } #[test] fn initialize_inx_initialized_account_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, keyed_account| { set_invoke_context_blockhash(invoke_context, 31); let authorized = *keyed_account.unsigned_key(); keyed_account .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); set_invoke_context_blockhash(invoke_context, 0); let result = keyed_account.initialize_nonce_account(&authorized, &rent, invoke_context); assert_eq!(result, Err(InstructionError::InvalidAccountData)); }) } #[test] fn initialize_inx_uninitialized_acc_insuff_funds_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports - 42, true, |invoke_context, keyed_account| { set_invoke_context_blockhash(invoke_context, 63); let authorized = *keyed_account.unsigned_key(); let result = keyed_account.initialize_nonce_account(&authorized, &rent, invoke_context); assert_eq!(result, Err(InstructionError::InsufficientFunds)); }) } #[test] fn authorize_inx_ok() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { let mut signers = HashSet::new(); signers.insert(*nonce_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 31); let authorized = *nonce_account.unsigned_key(); nonce_account .initialize_nonce_account(&authorized, &rent, invoke_context) .unwrap(); let authority = Pubkey::default(); let data = nonce::state::Data::new( authority, invoke_context.blockhash, invoke_context.lamports_per_signature, ); let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers, invoke_context); assert_eq!(result, Ok(())); let state = AccountUtilsState::<Versions>::state(nonce_account) .unwrap() .convert_to_current(); assert_eq!(state, State::Initialized(data)); }) } #[test] fn authorize_inx_uninitialized_state_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { let mut signers = HashSet::new(); signers.insert(*nonce_account.signer_key().unwrap()); let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers, invoke_context); assert_eq!(result, Err(InstructionError::InvalidAccountData)); }) } #[test] fn authorize_inx_bad_authority_fail() { let rent = Rent { lamports_per_byte_year: 42, ..Rent::default() }; let min_lamports = rent.minimum_balance(State::size()); with_mockup(min_lamports + 42, true, |invoke_context, nonce_account| { let mut signers = HashSet::new(); signers.insert(*nonce_account.signer_key().unwrap()); set_invoke_context_blockhash(invoke_context, 31); let authorized = &Pubkey::default().clone(); nonce_account .initialize_nonce_account(authorized, &rent, invoke_context) .unwrap(); let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers, invoke_context); assert_eq!(result, Err(InstructionError::MissingRequiredSignature)); }) } #[test] fn verify_nonce_ok() { with_mockup(42, true, |invoke_context, nonce_account| { let mut signers = HashSet::new(); signers.insert(nonce_account.signer_key().unwrap()); let state: State = nonce_account.state().unwrap(); // New is in Uninitialzed state assert_eq!(state, State::Uninitialized); set_invoke_context_blockhash(invoke_context, 0); let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account(authorized, &Rent::free(), invoke_context) .unwrap(); assert!(verify_nonce_account( &nonce_account.account.borrow(), &invoke_context.blockhash, )); }); } #[test] fn verify_nonce_bad_acc_state_fail() { with_mockup(42, true, |_invoke_context, nonce_account| { assert!(!verify_nonce_account( &nonce_account.account.borrow(), &Hash::default() )); }); } #[test] fn verify_nonce_bad_query_hash_fail() { with_mockup(42, true, |invoke_context, nonce_account| { let mut signers = HashSet::new(); signers.insert(nonce_account.signer_key().unwrap()); let state: State = nonce_account.state().unwrap(); // New is in Uninitialzed state assert_eq!(state, State::Uninitialized); set_invoke_context_blockhash(invoke_context, 0); let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account(authorized, &Rent::free(), invoke_context) .unwrap(); set_invoke_context_blockhash(invoke_context, 1); assert!(!verify_nonce_account( &nonce_account.account.borrow(), &invoke_context.blockhash, )); }); } }
40.752484
100
0.538891
1d83f51ff23635ffa16ad73c91b524b08fdc9d0e
4,265
use tract_core::prelude::*; fn setup_test_logger() { let _ = env_logger::Builder::from_env("TRACT_LOG").try_init(); } #[derive(Copy, Clone, PartialEq, Debug)] pub enum Mode { Infer, Type, Opt, } pub fn compare<S: AsRef<str>>( graph: &[u8], inputs: Vec<(S, Tensor)>, output: &str, ) -> std::result::Result<(), ::proptest::test_runner::TestCaseError> { for mode in &[Mode::Infer, Mode::Type, Mode::Opt] { compare_optim(graph, &inputs, output, *mode)?; } Ok(()) } pub fn run_tract<S: AsRef<str>>( graph: &[u8], inputs: &Vec<(S, Tensor)>, output: &str, mode: Mode, ) -> TractResult<TVec<Arc<Tensor>>> { let mut model = tract_tensorflow::tensorflow().model_for_read(&mut &*graph)?; model.set_input_names(&inputs.iter().map(|pair| pair.0.as_ref()).collect::<Vec<&str>>())?; model.set_output_names(&[output])?; for (ix, (_, tf)) in inputs.iter().enumerate() { model.set_input_fact(ix, TensorFact::dt_shape(tf.datum_type(), tf.shape()))?; } info!("analysed"); let inputs = inputs.iter().map(|pair| pair.1.clone()).collect(); if mode == Mode::Infer { let plan = SimplePlan::new(&model)?; plan.run(inputs) } else { let mut model = model.into_typed()?; info!("typed"); if mode == Mode::Opt { model = model.into_optimized()?; info!("optimized"); } trace!("{:#?}", model); let plan = SimplePlan::new(&model)?; plan.run(inputs) } } pub fn compare_optim<S: AsRef<str>>( graph: &[u8], inputs: &Vec<(S, Tensor)>, output: &str, mode: Mode, ) -> std::result::Result<(), ::proptest::test_runner::TestCaseError> { setup_test_logger(); let tf_inputs: Vec<(&str, Tensor)> = inputs.iter().map(|(s, m)| (s.as_ref(), m.clone())).collect(); let expected = tract_tensorflow::conform::tf::for_slice(&graph)?.run(tf_inputs.clone(), &output)?; info!("Tensorflow says: {:?}", expected); info!("Checking {} output against tensorflow ({:?})", output, mode); let found = match run_tract(graph, inputs, output, mode) { Err(e) => { use crate::tract_core::error_chain::ChainedError; error!("{}", e.display_chain()); return Err(e.into()); } Ok(t) => t, }; expected[0].close_enough(&found[0], true).unwrap(); info!("Mode: {:?} passed", mode); Ok(()) } #[allow(dead_code)] pub fn infer<S: AsRef<str>>( graph: &[u8], inputs: Vec<(S, Tensor)>, output_str: &str, ) -> std::result::Result<(), ::proptest::test_runner::TestCaseError> { setup_test_logger(); let mut model = tract_tensorflow::tensorflow().model_for_read(&mut &*graph)?; model.set_input_names(&inputs.iter().map(|pair| pair.0.as_ref()).collect::<Vec<&str>>())?; model.set_output_names(&[output_str])?; for (ix, (_, tf)) in inputs.iter().enumerate() { model.set_input_fact(ix, TensorFact::dt_shape(tf.datum_type(), tf.shape()))?; } let plan = SimplePlan::new(&model)?; let mut state = SimpleState::new(&plan)?; for (ix, (_, t)) in inputs.iter().enumerate() { state.set_input(ix, t.clone()).unwrap(); } let output = model.node_by_name(output_str)?; info!("Checking {} behaviour against tensorflow", output.name); state.compute_recursively(output.id)?; let _found = &state.values[output.id].as_ref().unwrap(); info!("Checking inference consistency on {}", output.name); let input_vectors: TVec<TensorFact> = output .inputs .iter() .map(|outlet| { state.values[outlet.node].as_ref().unwrap()[outlet.slot] .clone() .into_tensor() .clone() .into() }) .collect(); let output_vectors: TVec<TensorFact> = tvec![state.values[output.id].as_ref().unwrap()[0].clone().into_tensor().clone().into(),]; let input_facts = input_vectors.iter().collect(); let output_facts = output_vectors.iter().collect(); let output = model.node_by_name_mut(output_str)?; let e = output.op.infer_facts(input_facts, output_facts, tvec!()); prop_assert!(e.is_ok(), "{:?}", e); Ok(()) }
32.807692
98
0.581243
1e9be097815080b2bcce1f38ca566453e95a55bc
40,346
use std::cell::Cell; use std::mem; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_index::vec::IndexVec; use rustc_macros::HashStable; use rustc_middle::ich::StableHashingContext; use rustc_middle::mir; use rustc_middle::mir::interpret::{ sign_extend, truncate, FrameInfo, GlobalId, InterpResult, Pointer, Scalar, }; use rustc_middle::ty::layout::{self, TyAndLayout}; use rustc_middle::ty::{ self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, }; use rustc_span::{source_map::DUMMY_SP, Span}; use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout}; use super::{ Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy, ScalarMaybeUninit, StackPopJump, }; use crate::transform::validate::equal_up_to_regions; use crate::util::storage::AlwaysLiveLocals; pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> { /// Stores the `Machine` instance. /// /// Note: the stack is provided by the machine. pub machine: M, /// The results of the type checker, from rustc. /// The span in this is the "root" of the evaluation, i.e., the const /// we are evaluating (if this is CTFE). pub tcx: TyCtxtAt<'tcx>, /// Bounds in scope for polymorphic evaluations. pub(crate) param_env: ty::ParamEnv<'tcx>, /// The virtual memory system. pub memory: Memory<'mir, 'tcx, M>, /// A cache for deduplicating vtables pub(super) vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>, } /// A stack frame. #[derive(Clone)] pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// /// The MIR for the function called on this frame. pub body: &'mir mir::Body<'tcx>, /// The def_id and substs of the current function. pub instance: ty::Instance<'tcx>, /// Extra data for the machine. pub extra: Extra, //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// /// Work to perform when returning from this function. pub return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to, /// and its layout in the caller. pub return_place: Option<PlaceTy<'tcx, Tag>>, /// The list of locals for this stack frame, stored in order as /// `[return_ptr, arguments..., variables..., temporaries...]`. /// The locals are stored as `Option<Value>`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>, //////////////////////////////////////////////////////////////////////////////// // Current position within the function //////////////////////////////////////////////////////////////////////////////// /// If this is `None`, we are unwinding and this function doesn't need any clean-up. /// Just continue the same as with `Resume`. pub loc: Option<mir::Location>, } #[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these pub enum StackPopCleanup { /// Jump to the next block in the caller, or cause UB if None (that's a function /// that may never return). Also store layout of return place so /// we can validate it at that layout. /// `ret` stores the block we jump to on a normal return, while `unwind` /// stores the block used for cleanup during unwinding. Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> }, /// Just do nothing: Used by Main and for the `box_alloc` hook in miri. /// `cleanup` says whether locals are deallocated. Static computation /// wants them leaked to intern what they need (and just throw away /// the entire `ecx` when it is done). None { cleanup: bool }, } /// State of a local variable including a memoized layout #[derive(Clone, PartialEq, Eq, HashStable)] pub struct LocalState<'tcx, Tag = ()> { pub value: LocalValue<Tag>, /// Don't modify if `Some`, this is only used to prevent computing the layout twice #[stable_hasher(ignore)] pub layout: Cell<Option<TyAndLayout<'tcx>>>, } /// Current value of a local variable #[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these pub enum LocalValue<Tag = ()> { /// This local is not currently alive, and cannot be used at all. Dead, /// This local is alive but not yet initialized. It can be written to /// but not read from or its address taken. Locals get initialized on /// first write because for unsized locals, we do not know their size /// before that. Uninitialized, /// A normal, live local. /// Mostly for convenience, we re-use the `Operand` type here. /// This is an optimization over just always having a pointer here; /// we can thus avoid doing an allocation when the local just stores /// immediate values *and* never has its address taken. Live(Operand<Tag>), } impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> { /// Read the local's value or error if the local is not yet live or not live anymore. /// /// Note: This may only be invoked from the `Machine::access_local` hook and not from /// anywhere else. You may be invalidating machine invariants if you do! pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> { match self.value { LocalValue::Dead => throw_ub!(DeadLocal), LocalValue::Uninitialized => { bug!("The type checker should prevent reading from a never-written local") } LocalValue::Live(val) => Ok(val), } } /// Overwrite the local. If the local can be overwritten in place, return a reference /// to do so; otherwise return the `MemPlace` to consult instead. /// /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from /// anywhere else. You may be invalidating machine invariants if you do! pub fn access_mut( &mut self, ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> { match self.value { LocalValue::Dead => throw_ub!(DeadLocal), LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)), ref mut local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => { Ok(Ok(local)) } } } } impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> { pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> { Frame { body: self.body, instance: self.instance, return_to_block: self.return_to_block, return_place: self.return_place, locals: self.locals, loc: self.loc, extra, } } } impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> { /// Return the `SourceInfo` of the current instruction. pub fn current_source_info(&self) -> Option<&mir::SourceInfo> { self.loc.map(|loc| self.body.source_info(loc)) } } impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { &self.tcx.data_layout } } impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M> where M: Machine<'mir, 'tcx>, { #[inline] fn tcx(&self) -> TyCtxt<'tcx> { *self.tcx } } impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M> where M: Machine<'mir, 'tcx>, { fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env } } impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> { type Ty = Ty<'tcx>; type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>; #[inline] fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { self.tcx .layout_of(self.param_env.and(ty)) .map_err(|layout| err_inval!(Layout(layout)).into()) } } /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value. /// This test should be symmetric, as it is primarily about layout compatibility. pub(super) fn mir_assign_valid_types<'tcx>( tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, src: TyAndLayout<'tcx>, dest: TyAndLayout<'tcx>, ) -> bool { // Type-changing assignments can happen when subtyping is used. While // all normal lifetimes are erased, higher-ranked types with their // late-bound lifetimes are still around and can lead to type // differences. So we compare ignoring lifetimes. if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) { // Make sure the layout is equal, too -- just to be safe. Miri really // needs layout equality. For performance reason we skip this check when // the types are equal. Equal types *can* have different layouts when // enum downcast is involved (as enum variants carry the type of the // enum), but those should never occur in assignments. if cfg!(debug_assertions) || src.ty != dest.ty { assert_eq!(src.layout, dest.layout); } true } else { false } } /// Use the already known layout if given (but sanity check in debug mode), /// or compute the layout. #[cfg_attr(not(debug_assertions), inline(always))] pub(super) fn from_known_layout<'tcx>( tcx: TyCtxtAt<'tcx>, param_env: ParamEnv<'tcx>, known_layout: Option<TyAndLayout<'tcx>>, compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { match known_layout { None => compute(), Some(known_layout) => { if cfg!(debug_assertions) { let check_layout = compute()?; if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) { span_bug!( tcx.span, "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}", known_layout.ty, check_layout.ty, ); } } Ok(known_layout) } } } impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn new( tcx: TyCtxt<'tcx>, root_span: Span, param_env: ty::ParamEnv<'tcx>, machine: M, memory_extra: M::MemoryExtra, ) -> Self { InterpCx { machine, tcx: tcx.at(root_span), param_env, memory: Memory::new(tcx, memory_extra), vtables: FxHashMap::default(), } } #[inline(always)] pub fn cur_span(&self) -> Span { self.stack() .last() .and_then(|f| f.current_source_info()) .map(|si| si.span) .unwrap_or(self.tcx.span) } #[inline(always)] pub fn force_ptr( &self, scalar: Scalar<M::PointerTag>, ) -> InterpResult<'tcx, Pointer<M::PointerTag>> { self.memory.force_ptr(scalar) } #[inline(always)] pub fn force_bits( &self, scalar: Scalar<M::PointerTag>, size: Size, ) -> InterpResult<'tcx, u128> { self.memory.force_bits(scalar, size) } /// Call this to turn untagged "global" pointers (obtained via `tcx`) into /// the machine pointer to the allocation. Must never be used /// for any other pointers, nor for TLS statics. /// /// Using the resulting pointer represents a *direct* access to that memory /// (e.g. by directly using a `static`), /// as opposed to access through a pointer that was created by the program. /// /// This function can fail only if `ptr` points to an `extern static`. #[inline(always)] pub fn global_base_pointer(&self, ptr: Pointer) -> InterpResult<'tcx, Pointer<M::PointerTag>> { self.memory.global_base_pointer(ptr) } #[inline(always)] pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] { M::stack(self) } #[inline(always)] pub(crate) fn stack_mut( &mut self, ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> { M::stack_mut(self) } #[inline(always)] pub fn frame_idx(&self) -> usize { let stack = self.stack(); assert!(!stack.is_empty()); stack.len() - 1 } #[inline(always)] pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack().last().expect("no call frames exist") } #[inline(always)] pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack_mut().last_mut().expect("no call frames exist") } #[inline(always)] pub(super) fn body(&self) -> &'mir mir::Body<'tcx> { self.frame().body } #[inline(always)] pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 { assert!(ty.abi.is_signed()); sign_extend(value, ty.size) } #[inline(always)] pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 { truncate(value, ty.size) } #[inline] pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { ty.is_sized(self.tcx, self.param_env) } #[inline] pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { ty.is_freeze(self.tcx, self.param_env) } pub fn load_mir( &self, instance: ty::InstanceDef<'tcx>, promoted: Option<mir::Promoted>, ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { // do not continue if typeck errors occurred (can only occur in local crate) let def = instance.with_opt_param(); if let Some(def) = def.as_local() { if self.tcx.has_typeck_results(def.did) { if let Some(error_reported) = self.tcx.typeck_opt_const_arg(def).tainted_by_errors { throw_inval!(TypeckError(error_reported)) } } } trace!("load mir(instance={:?}, promoted={:?})", instance, promoted); if let Some(promoted) = promoted { return Ok(&self.tcx.promoted_mir_of_opt_const_arg(def)[promoted]); } match instance { ty::InstanceDef::Item(def) => { if self.tcx.is_mir_available(def.did) { if let Some((did, param_did)) = def.as_const_arg() { Ok(self.tcx.optimized_mir_of_const_arg((did, param_did))) } else { Ok(self.tcx.optimized_mir(def.did)) } } else { throw_unsup!(NoMirFor(def.did)) } } _ => Ok(self.tcx.instance_mir(instance)), } } /// Call this on things you got out of the MIR (so it is as generic as the current /// stack frame), to bring it into the proper environment for this interpreter. pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>( &self, value: T, ) -> T { self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value) } /// Call this on things you got out of the MIR (so it is as generic as the provided /// stack frame), to bring it into the proper environment for this interpreter. pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>( &self, frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, value: T, ) -> T { if let Some(substs) = frame.instance.substs_for_mir_body() { self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value) } else { self.tcx.normalize_erasing_regions(self.param_env, value) } } /// The `substs` are assumed to already be in our interpreter "universe" (param_env). pub(super) fn resolve( &self, def_id: DefId, substs: SubstsRef<'tcx>, ) -> InterpResult<'tcx, ty::Instance<'tcx>> { trace!("resolve: {:?}, {:#?}", def_id, substs); trace!("param_env: {:#?}", self.param_env); trace!("substs: {:#?}", substs); match ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs) { Ok(Some(instance)) => Ok(instance), Ok(None) => throw_inval!(TooGeneric), // FIXME(eddyb) this could be a bit more specific than `TypeckError`. Err(error_reported) => throw_inval!(TypeckError(error_reported)), } } pub fn layout_of_local( &self, frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, local: mir::Local, layout: Option<TyAndLayout<'tcx>>, ) -> InterpResult<'tcx, TyAndLayout<'tcx>> { // `const_prop` runs into this with an invalid (empty) frame, so we // have to support that case (mostly by skipping all caching). match frame.locals.get(local).and_then(|state| state.layout.get()) { None => { let layout = from_known_layout(self.tcx, self.param_env, layout, || { let local_ty = frame.body.local_decls[local].ty; let local_ty = self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty); self.layout_of(local_ty) })?; if let Some(state) = frame.locals.get(local) { // Layouts of locals are requested a lot, so we cache them. state.layout.set(Some(layout)); } Ok(layout) } Some(layout) => Ok(layout), } } /// Returns the actual dynamic size and alignment of the place at the given type. /// Only the "meta" (metadata) part of the place matters. /// This can fail to provide an answer for extern types. pub(super) fn size_and_align_of( &self, metadata: MemPlaceMeta<M::PointerTag>, layout: TyAndLayout<'tcx>, ) -> InterpResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { return Ok(Some((layout.size, layout.align.abi))); } match layout.ty.kind { ty::Adt(..) | ty::Tuple(..) => { // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. assert!(!layout.ty.is_simd()); assert!(layout.fields.count() > 0); trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); let sized_align = layout.align.abi; trace!( "DST {} statically sized prefix size: {:?} align: {:?}", layout.ty, sized_size, sized_align ); // Recurse to get the size of the dynamically sized field (must be // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1)?; let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? { Some(size_and_align) => size_and_align, None => { // A field with extern type. If this field is at offset 0, we behave // like the underlying extern type. // FIXME: Once we have made decisions for how to handle size and alignment // of `extern type`, this should be adapted. It is just a temporary hack // to get some code to work that probably ought to work. if sized_size == Size::ZERO { return Ok(None); } else { span_bug!( self.cur_span(), "Fields cannot be extern types, unless they are at offset 0" ) } } }; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` // required of the unsized field that follows) before // summing it with `sized_size`. (Note that since #26403 // is unfixed, we do not yet add the necessary padding // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. let size = sized_size + unsized_size; // `Size` addition // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). let align = sized_align.max(unsized_align); // Issue #27023: must add any necessary padding to `size` // (to make it a multiple of `align`) before returning it. let size = size.align_to(align); // Check if this brought us over the size limit. if size.bytes() >= self.tcx.data_layout.obj_size_bound() { throw_ub!(InvalidMeta("total size is bigger than largest supported object")); } Ok(Some((size, align))) } ty::Dynamic(..) => { let vtable = metadata.unwrap_meta(); // Read size and align from vtable (already checks size). Ok(Some(self.read_size_and_align_from_vtable(vtable)?)) } ty::Slice(_) | ty::Str => { let len = metadata.unwrap_meta().to_machine_usize(self)?; let elem = layout.field(self, 0)?; // Make sure the slice is not too big. let size = elem.size.checked_mul(len, self).ok_or_else(|| { err_ub!(InvalidMeta("slice is bigger than largest supported object")) })?; Ok(Some((size, elem.align.abi))) } ty::Foreign(_) => Ok(None), _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty), } } #[inline] pub fn size_and_align_of_mplace( &self, mplace: MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Option<(Size, Align)>> { self.size_and_align_of(mplace.meta, mplace.layout) } pub fn push_stack_frame( &mut self, instance: ty::Instance<'tcx>, body: &'mir mir::Body<'tcx>, return_place: Option<PlaceTy<'tcx, M::PointerTag>>, return_to_block: StackPopCleanup, ) -> InterpResult<'tcx> { if !self.stack().is_empty() { info!("PAUSING({}) {}", self.frame_idx(), self.frame().instance); } ::log_settings::settings().indentation += 1; // first push a stack frame so we have access to the local substs let pre_frame = Frame { body, loc: Some(mir::Location::START), return_to_block, return_place, // empty local array, we fill it in below, after we are inside the stack frame and // all methods actually know about the frame locals: IndexVec::new(), instance, extra: (), }; let frame = M::init_frame_extra(self, pre_frame)?; self.stack_mut().push(frame); // Locals are initially uninitialized. let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) }; let mut locals = IndexVec::from_elem(dummy, &body.local_decls); // Now mark those locals as dead that we do not want to initialize match self.tcx.def_kind(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them // // FIXME: The above is likely untrue. See // <https://github.com/rust-lang/rust/pull/70004#issuecomment-602022110>. Is it // okay to ignore `StorageDead`/`StorageLive` annotations during CTFE? DefKind::Static | DefKind::Const | DefKind::AssocConst => {} _ => { // Mark locals that use `Storage*` annotations as dead on function entry. let always_live = AlwaysLiveLocals::new(self.body()); for local in locals.indices() { if !always_live.contains(local) { locals[local].value = LocalValue::Dead; } } } } // done self.frame_mut().locals = locals; M::after_stack_push(self)?; info!("ENTERING({}) {}", self.frame_idx(), self.frame().instance); if !self.tcx.sess.recursion_limit().value_within_limit(self.stack().len()) { throw_exhaust!(StackFrameLimitReached) } else { Ok(()) } } /// Jump to the given block. #[inline] pub fn go_to_block(&mut self, target: mir::BasicBlock) { self.frame_mut().loc = Some(mir::Location { block: target, statement_index: 0 }); } /// *Return* to the given `target` basic block. /// Do *not* use for unwinding! Use `unwind_to_block` instead. /// /// If `target` is `None`, that indicates the function cannot return, so we raise UB. pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> { if let Some(target) = target { self.go_to_block(target); Ok(()) } else { throw_ub!(Unreachable) } } /// *Unwind* to the given `target` basic block. /// Do *not* use for returning! Use `return_to_block` instead. /// /// If `target` is `None`, that indicates the function does not need cleanup during /// unwinding, and we will just keep propagating that upwards. pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) { self.frame_mut().loc = target.map(|block| mir::Location { block, statement_index: 0 }); } /// Pops the current frame from the stack, deallocating the /// memory for allocated locals. /// /// If `unwinding` is `false`, then we are performing a normal return /// from a function. In this case, we jump back into the frame of the caller, /// and continue execution as normal. /// /// If `unwinding` is `true`, then we are in the middle of a panic, /// and need to unwind this frame. In this case, we jump to the /// `cleanup` block for the function, which is responsible for running /// `Drop` impls for any locals that have been initialized at this point. /// The cleanup block ends with a special `Resume` terminator, which will /// cause us to continue unwinding. pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> { info!( "LEAVING({}) {} (unwinding = {})", self.frame_idx(), self.frame().instance, unwinding ); // Sanity check `unwinding`. assert_eq!( unwinding, match self.frame().loc { None => true, Some(loc) => self.body().basic_blocks()[loc.block].is_cleanup, } ); if unwinding && self.frame_idx() == 0 { throw_ub_format!("unwinding past the topmost frame of the stack"); } ::log_settings::settings().indentation -= 1; let frame = self.stack_mut().pop().expect("tried to pop a stack frame, but there were none"); if !unwinding { // Copy the return value to the caller's stack frame. if let Some(return_place) = frame.return_place { let op = self.access_local(&frame, mir::RETURN_PLACE, None)?; self.copy_op_transmute(op, return_place)?; trace!("{:?}", self.dump_place(*return_place)); } else { throw_ub!(Unreachable); } } // Now where do we jump next? // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. // In that case, we return early. We also avoid validation in that case, // because this is CTFE and the final value will be thoroughly validated anyway. let (cleanup, next_block) = match frame.return_to_block { StackPopCleanup::Goto { ret, unwind } => { (true, Some(if unwinding { unwind } else { ret })) } StackPopCleanup::None { cleanup, .. } => (cleanup, None), }; if !cleanup { assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked"); assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!"); assert!(!unwinding, "tried to skip cleanup during unwinding"); // Leak the locals, skip validation, skip machine hook. return Ok(()); } // Cleanup: deallocate all locals that are backed by an allocation. for local in &frame.locals { self.deallocate_local(local.value)?; } if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump { // The hook already did everything. // We want to skip the `info!` below, hence early return. return Ok(()); } // Normal return, figure out where to jump. if unwinding { // Follow the unwind edge. let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!"); self.unwind_to_block(unwind); } else { // Follow the normal return edge. if let Some(ret) = next_block { self.return_to_block(ret)?; } } if !self.stack().is_empty() { info!( "CONTINUING({}) {} (unwinding = {})", self.frame_idx(), self.frame().instance, unwinding ); } Ok(()) } /// Mark a storage as live, killing the previous content and returning it. /// Remember to deallocate that! pub fn storage_live( &mut self, local: mir::Local, ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> { assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); trace!("{:?} is now live", local); let local_val = LocalValue::Uninitialized; // StorageLive *always* kills the value that's currently stored. // However, we do not error if the variable already is live; // see <https://github.com/rust-lang/rust/issues/42371>. Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val)) } /// Returns the old value of the local. /// Remember to deallocate that! pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> { assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead) } pub(super) fn deallocate_local( &mut self, local: LocalValue<M::PointerTag>, ) -> InterpResult<'tcx> { // FIXME: should we tell the user that there was a local which was never written to? if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { // All locals have a backing allocation, even if the allocation is empty // due to the local having ZST type. let ptr = ptr.assert_ptr(); trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id)); self.memory.deallocate_local(ptr)?; }; Ok(()) } pub(super) fn const_eval( &self, gid: GlobalId<'tcx>, ty: Ty<'tcx>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics // and thus don't care about the parameter environment. While we could just use // `self.param_env`, that would mean we invoke the query to evaluate the static // with different parameter environments, thus causing the static to be evaluated // multiple times. let param_env = if self.tcx.is_static(gid.instance.def_id()) { ty::ParamEnv::reveal_all() } else { self.param_env }; let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?; // Even though `ecx.const_eval` is called from `const_to_op` we can never have a // recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not // return `ConstValue::Unevaluated`, which is the only way that `const_to_op` will call // `ecx.const_eval`. let const_ = ty::Const { val: ty::ConstKind::Value(val), ty }; self.const_to_op(&const_, None) } pub fn const_eval_raw( &self, gid: GlobalId<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics // and thus don't care about the parameter environment. While we could just use // `self.param_env`, that would mean we invoke the query to evaluate the static // with different parameter environments, thus causing the static to be evaluated // multiple times. let param_env = if self.tcx.is_static(gid.instance.def_id()) { ty::ParamEnv::reveal_all() } else { self.param_env }; // We use `const_eval_raw` here, and get an unvalidated result. That is okay: // Our result will later be validated anyway, and there seems no good reason // to have to fail early here. This is also more consistent with // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles. // FIXME: We can hit delay_span_bug if this is an invalid const, interning finds // that problem, but we never run validation to show an error. Can we ensure // this does not happen? let val = self.tcx.const_eval_raw(param_env.and(gid))?; self.raw_const_to_mplace(val) } #[must_use] pub fn dump_place(&'a self, place: Place<M::PointerTag>) -> PlacePrinter<'a, 'mir, 'tcx, M> { PlacePrinter { ecx: self, place } } #[must_use] pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> { let mut frames = Vec::new(); for frame in self.stack().iter().rev() { let source_info = frame.current_source_info(); let lint_root = source_info.and_then(|source_info| { match &frame.body.source_scopes[source_info.scope].local_data { mir::ClearCrossCrate::Set(data) => Some(data.lint_root), mir::ClearCrossCrate::Clear => None, } }); let span = source_info.map_or(DUMMY_SP, |source_info| source_info.span); frames.push(FrameInfo { span, instance: frame.instance, lint_root }); } trace!("generate stacktrace: {:#?}", frames); frames } } #[doc(hidden)] /// Helper struct for the `dump_place` function. pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { ecx: &'a InterpCx<'mir, 'tcx, M>, place: Place<M::PointerTag>, } impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug for PlacePrinter<'a, 'mir, 'tcx, M> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.place { Place::Local { frame, local } => { let mut allocs = Vec::new(); write!(fmt, "{:?}", local)?; if frame != self.ecx.frame_idx() { write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?; } write!(fmt, ":")?; match self.ecx.stack()[frame].locals[local].value { LocalValue::Dead => write!(fmt, " is dead")?, LocalValue::Uninitialized => write!(fmt, " is uninitialized")?, LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr { Scalar::Ptr(ptr) => { write!( fmt, " by align({}){} ref:", mplace.align.bytes(), match mplace.meta { MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta), MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(), } )?; allocs.push(ptr.alloc_id); } ptr => write!(fmt, " by integral ref: {:?}", ptr)?, }, LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { write!(fmt, " {:?}", val)?; if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(fmt, " ({:?}, {:?})", val1, val2)?; if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); } if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 { allocs.push(ptr.alloc_id); } } } write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs)) } Place::Ptr(mplace) => match mplace.ptr { Scalar::Ptr(ptr) => write!( fmt, "by align({}) ref: {:?}", mplace.align.bytes(), self.ecx.memory.dump_alloc(ptr.alloc_id) ), ptr => write!(fmt, " integral by ref: {:?}", ptr), }, } } } impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>> for Frame<'mir, 'tcx, Tag, Extra> where Extra: HashStable<StableHashingContext<'ctx>>, Tag: HashStable<StableHashingContext<'ctx>>, { fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) { // Exhaustive match on fields to make sure we forget no field. let Frame { body, instance, return_to_block, return_place, locals, loc, extra } = self; body.hash_stable(hcx, hasher); instance.hash_stable(hcx, hasher); return_to_block.hash_stable(hcx, hasher); return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher); locals.hash_stable(hcx, hasher); loc.hash_stable(hcx, hasher); extra.hash_stable(hcx, hasher); } }
40.467402
100
0.56241
79b251e34002fe38204e29534c0b3505426f5edc
4,158
use std::{cell::RefCell, rc::Rc}; use serde::Deserialize; use crate::dataflow::{Data, Message, State, Timestamp}; use super::{ errors::{ReadError, TryReadError}, IngestStream, InternalReadStream, LoopStream, StatefulReadStream, StreamId, WriteStream, }; /// Allows reading from a stream and registering callbacks. /// /// Passed as an argument to `Operator::new` and `Operator::connect`. /// Also used in the driver to construct a dataflow. #[derive(Clone, Default)] pub struct ReadStream<D: Data> { /// Stores information and internal information about the stream internal_stream: Rc<RefCell<InternalReadStream<D>>>, } impl<D: Data> ReadStream<D> { /// Create a stream into which we can write data. pub fn new() -> Self { Self { internal_stream: Rc::new(RefCell::new(InternalReadStream::new())), } } /// Add a callback to be invoked when the stream receives a message. pub fn add_callback<F: 'static + Fn(&Timestamp, &D)>(&self, callback: F) { self.internal_stream.borrow_mut().add_callback(callback); } /// Add a callback to be invoked after the stream received, and the operator /// processed all the messages with a timestamp. pub fn add_watermark_callback<F: 'static + Fn(&Timestamp)>(&self, callback: F) { self.internal_stream .borrow_mut() .add_watermark_callback(callback); } /// Returns a new instance of the stream with an associated state. /// /// This state is accessible by callbacks added to the /// [`StatefulReadStream`]. pub fn add_state<S: State>(&self, state: S) -> StatefulReadStream<D, S> { StatefulReadStream::from(self.internal_stream.borrow_mut().add_state(state)) } pub fn get_id(&self) -> StreamId { self.internal_stream.borrow().get_id() } pub fn get_name(&self) -> String { self.internal_stream.borrow().get_name().to_string() } pub fn is_closed(&self) -> bool { self.internal_stream.borrow().is_closed() } /// Tries to read a message from a channel. /// /// Returns an immutable reference, or `None` if no messages are /// available at the moment (i.e., non-blocking read). pub fn try_read(&self) -> Result<Message<D>, TryReadError> { self.internal_stream.borrow_mut().try_read() } /// Blocking read. Returns `None` if the stream doesn't have a receive endpoint. pub fn read(&self) -> Result<Message<D>, ReadError> { self.internal_stream.borrow_mut().read() } } impl<D: Data> From<&ReadStream<D>> for ReadStream<D> { fn from(read_stream: &ReadStream<D>) -> Self { read_stream.clone() } } impl<D: Data> From<&WriteStream<D>> for ReadStream<D> { fn from(write_stream: &WriteStream<D>) -> Self { Self::from(InternalReadStream::new_with_id_name( write_stream.get_id(), write_stream.get_name(), )) } } impl<D> From<&LoopStream<D>> for ReadStream<D> where for<'a> D: Data + Deserialize<'a>, { fn from(loop_stream: &LoopStream<D>) -> Self { Self::from(InternalReadStream::new_with_id_name( loop_stream.get_id(), loop_stream.get_name(), )) } } impl<D> From<&IngestStream<D>> for ReadStream<D> where for<'a> D: Data + Deserialize<'a>, { fn from(ingest_stream: &IngestStream<D>) -> Self { Self::from(InternalReadStream::new_with_id_name( ingest_stream.get_id(), ingest_stream.get_name(), )) } } impl<D: Data> From<InternalReadStream<D>> for ReadStream<D> { fn from(internal_stream: InternalReadStream<D>) -> Self { Self { internal_stream: Rc::new(RefCell::new(internal_stream)), } } } impl<D: Data> From<Rc<RefCell<InternalReadStream<D>>>> for ReadStream<D> { fn from(internal_stream: Rc<RefCell<InternalReadStream<D>>>) -> Self { Self { internal_stream } } } impl<D: Data> From<&ReadStream<D>> for Rc<RefCell<InternalReadStream<D>>> { fn from(read_stream: &ReadStream<D>) -> Self { Rc::clone(&read_stream.internal_stream) } }
30.8
92
0.638047
c14d634632ed4282bcd08e6ada097ab6f7821161
2,620
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common_exception::Result; use crate::prelude::*; mod boolean; mod date; mod date_time; mod nulls; mod number; mod string; mod r#struct; pub use boolean::*; pub use date::*; pub use date_time::*; pub use nulls::*; pub use number::*; pub use r#struct::*; pub use string::*; pub trait TypeSerializer: Send + Sync { fn serialize_value(&self, value: &DataValue) -> Result<String>; fn serialize_column(&self, column: &DataColumn) -> Result<Vec<String>>; } impl DataType { pub fn create_serializer(&self) -> Box<dyn TypeSerializer> { match self { DataType::Null => Box::new(NullSerializer {}), DataType::Boolean => Box::new(BooleanSerializer {}), DataType::UInt8 => Box::new(NumberSerializer::<u8>::default()), DataType::UInt16 => Box::new(NumberSerializer::<u16>::default()), DataType::UInt32 => Box::new(NumberSerializer::<u32>::default()), DataType::UInt64 => Box::new(NumberSerializer::<u64>::default()), DataType::Int8 => Box::new(NumberSerializer::<i8>::default()), DataType::Int16 => Box::new(NumberSerializer::<i16>::default()), DataType::Int32 => Box::new(NumberSerializer::<i32>::default()), DataType::Int64 => Box::new(NumberSerializer::<i64>::default()), DataType::Float32 => Box::new(NumberSerializer::<f32>::default()), DataType::Float64 => Box::new(NumberSerializer::<f64>::default()), DataType::Date16 => Box::new(DateSerializer::<u16>::default()), DataType::Date32 => Box::new(DateSerializer::<i32>::default()), DataType::DateTime32(_) => Box::new(DateTimeSerializer::<u32>::default()), DataType::DateTime64(_, _) => Box::new(DateTimeSerializer::<i64>::default()), DataType::String => Box::new(StringSerializer {}), DataType::Struct(fields) => Box::new(StructSerializer { fields: fields.to_vec(), }), _ => todo!(), } } }
39.104478
89
0.629389
6a9474dbf224217726e52e92f33e5e1c471b6186
1,193
use std::{cmp::PartialOrd, marker::Copy}; /// Sets all items of a mutable vector to the given value. pub fn set_all<T>(vector: &mut [T], value: T) where T: Copy, { for item in vector.iter_mut() { *item = value; } } /// Finds the item with the maximum index. pub fn find_max_index<T>(array: &[T]) -> usize where T: PartialOrd, { let mut vote_max_idx = 0; for i in 1 .. array.len() { if array[i] > array[vote_max_idx] { vote_max_idx = i; } } vote_max_idx } /// As implemented in `libsvm`. pub fn sigmoid_predict(decision_value: f64, a: f64, b: f64) -> f64 { let fapb = decision_value * a + b; // Citing from the original libSVM implementation: // "1-p used later; avoid catastrophic cancellation" if fapb >= 0_f64 { (-fapb).exp() / (1_f64 + (-fapb).exp()) } else { 1_f64 / (1_f64 + fapb.exp()) } } /// As implemented in `libsvm`. pub fn powi(base: f64, times: u32) -> f64 { let mut tmp = base; let mut ret = 1.0; let mut t = times; while t > 0 { if t % 2 == 1 { ret *= tmp }; tmp = tmp * tmp; t /= 2; } ret }
20.220339
68
0.540654
167a125d9b9a12b82dcedcf1864c4f45ba886ba5
3,485
use std::marker::PhantomData; /// Manage allocation of arbitrary long chunks of memory from an externally backed memory pool. /// /// This is useful for fitting many things inside a vulkan buffer. pub struct Allocator<T> { free: Vec<BufferAllocation<T>>, capacity: usize, } impl<T> Allocator<T> { pub fn new(capacity: usize) -> Self { Self { free: vec![BufferAllocation::new(capacity, 0)], capacity, } } /// Allocate `len` elements from the allocator. /// /// Return None if no contiguous free space was found of len pub fn allocate(&mut self, len: usize) -> Option<BufferAllocation<T>> { let (index, block) = self .free .iter_mut() .enumerate() .find(|(_, block)| block.len >= len)?; if block.len == len { Some(self.free.remove(index)) } else { let mut block = std::mem::replace( block, BufferAllocation::new(block.len - len, block.offset + len), ); block.len = len; Some(block) } } /// Free an allocation. /// /// Behaviour is undefined if the allocation was not originally from self pub fn free(&mut self, block: BufferAllocation<T>) { let index = self .free .iter() .position(|val| val.offset > block.offset) .unwrap_or(self.free.len()); if index != 0 && index != self.free.len() { match &mut self.free[index - 1..=index] { [a, b] => { if a.offset + a.len + block.len == b.offset { a.len += block.len + b.len; self.free.remove(index); } else if a.offset + a.len == block.offset { a.len += block.len; } else if block.offset + block.len == b.offset { b.len += block.len; b.offset = block.len; } else { self.free.insert(index, block); } } _ => unreachable!(), } } } /// Doubles the available size pub fn grow_double(&mut self) { self.grow(self.capacity) } /// Fit additionalelements pub fn grow(&mut self, additional: usize) { match self.free.pop() { Some(val) => self .free .push(BufferAllocation::new(val.len + additional, val.offset)), None => self .free .push(BufferAllocation::new(additional, self.capacity)), } self.capacity += additional; } /// Get a reference to the allocator's capacity. pub fn capacity(&self) -> usize { self.capacity } } #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct BufferAllocation<T> { len: usize, offset: usize, marker: PhantomData<T>, } impl<T> BufferAllocation<T> { fn new(len: usize, offset: usize) -> Self { Self { len, offset, marker: PhantomData, } } /// Get a reference to the buffer allocation's len. #[inline] pub fn len(&self) -> usize { self.len } /// Get a reference to the buffer allocation's offset. #[inline] pub fn offset(&self) -> usize { self.offset } }
27.88
95
0.497274
906e7c8e9147e3340d471ace6c9c21f468f10b8d
12,011
// Copyright 2020 Matthias Krüger. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use crate::cache::caches::{Cache, RegistrySuperCache}; use crate::cache::*; use crate::library::*; use crate::remove::*; use chrono::{prelude::*, NaiveDateTime}; use regex::Regex; // remove cache items that are older than X or younger than Y (or between X and Y) // testing: // ./target/debug/cargo-cache --dry-run --remove-dir=git-db --remove-if-younger-than 08:08:08 // check how to query files #[derive(Debug, Clone)] enum AgeRelation<'a> { None, FileOlderThanDate(&'a str), FileYoungerThanDate(&'a str), // OlderOrYounger(&'a str, &'a str), } fn parse_date(date: &str) -> Result<NaiveDateTime, Error> { // @TODO handle yyyyy.mm.dd hh:mm:ss // @TODO handle dd.mm.yy if yy is yy and not yyyy let date_to_compare: NaiveDateTime = { // xxxx.xx.xx => yyyy.mm.dd // we only have a date but no time if Regex::new(r"^\d{4}.\d{2}.\d{2}$").unwrap().is_match(date) { // most likely a date let now = Local::now(); let split: Result<Vec<u32>, _> = date.split('.').map(str::parse).collect(); let split = match split { Ok(result) => result, Err(a) => return Err(Error::DateParseFailure(a.to_string(), "u32".into())), }; #[allow(clippy::cast_possible_wrap)] let nd = if let Some(date) = NaiveDate::from_ymd_opt(split[0] as i32, split[1], split[2]) { date } else { return Err(Error::DateParseFailure( format!("{}.{}.{}", split[0], split[1], split[2]), "date".into(), )); }; nd.and_hms(now.hour(), now.minute(), now.second()) // xx:xx:xx => hh::mm::ss } else if Regex::new(r"^\d{2}:\d{2}:\d{2}$").unwrap().is_match(date) { // probably a time let today = Local::today(); let split: Result<Vec<u32>, _> = date.split(':').map(str::parse).collect(); let split = match split { Ok(result) => result, Err(a) => return Err(Error::DateParseFailure(a.to_string(), "u32".into())), }; let nd = if let Some(date) = NaiveDate::from_ymd_opt(today.year(), today.month(), today.day()) { date } else { return Err(Error::DateParseFailure( format!("{}:{}:{}", today.year(), today.month(), today.day()), "date".into(), )); }; nd.and_hms(split[0], split[1], split[2]) } else { return Err(Error::DateParseFailure(date.into(), "a valid date".into())); } }; Ok(date_to_compare) } #[derive(Debug, Clone)] struct FileWithDate { file: std::path::PathBuf, access_date: NaiveDateTime, } fn filter_files_by_date<'a>( date: &AgeRelation<'_>, files: &'a [FileWithDate], ) -> Result<Vec<&'a FileWithDate>, Error> { match date { AgeRelation::None => { unreachable!("ERROR: no dates were supplied although -o or -y were passed!"); } AgeRelation::FileYoungerThanDate(younger_date) => { // file is younger than date if file.date > date_param let date_parameter = parse_date(younger_date)?; Ok(files .iter() .filter(|file| file.access_date > date_parameter) .collect()) } AgeRelation::FileOlderThanDate(older_date) => { // file is older than date if file.date < date_param let date_parameter = parse_date(older_date)?; Ok(files .iter() .filter(|file| file.access_date < date_parameter) .collect()) } /* DateComparison::OlderOrYounger(older_date, younger_date) => { let younger_than = parse_date(younger_date)?; let older_than = parse_date(older_date)?; Ok(files .iter() // this may be bugged .filter(|file| file.access_date < younger_than || file.access_date > older_than) .collect()) } */ } } /// removes files that are older than $date from the cache, dirs can be specified #[allow(clippy::too_many_arguments)] pub(crate) fn remove_files_by_dates( // we need to know which part of the cargo-cache we need to clear out! checkouts_cache: &mut git_checkouts::GitCheckoutCache, bare_repos_cache: &mut git_bare_repos::GitRepoCache, registry_pkg_caches: &mut registry_pkg_cache::RegistryPkgCaches, registry_sources_caches: &mut registry_sources::RegistrySourceCaches, arg_younger: Option<&str>, arg_older: Option<&str>, dry_run: bool, dirs: Option<&str>, mut size_changed: &mut bool, ) -> Result<(), Error> { if dirs.is_none() { return Err(Error::RemoveDirNoArg); } // get the list of components that we want to check let components_to_remove_from = components_from_groups(dirs)?; // println!("components: {:?}", components_to_remove_from); let mut files_of_components: Vec<std::path::PathBuf> = Vec::new(); components_to_remove_from.iter().for_each(|component| { match component { Component::RegistryCrateCache => { files_of_components.extend(registry_pkg_caches.files()); } Component::RegistrySources => { files_of_components.extend(registry_sources_caches.files()); } Component::RegistryIndex => { /* ignore this case */ } Component::GitRepos => { files_of_components.extend(checkouts_cache.items().iter().map(|p| p.to_path_buf())); } Component::GitDB => { files_of_components .extend(bare_repos_cache.items().iter().map(|p| p.to_path_buf())); } } }); // try to find out how to compare dates let date_comp: AgeRelation<'_> = match (arg_older, arg_younger) { (None, None) => AgeRelation::None, (None, Some(younger)) => AgeRelation::FileYoungerThanDate(younger), (Some(older), None) => AgeRelation::FileOlderThanDate(older), (Some(_older), Some(_younger)) => { unreachable!("passing both, --remove-if-{older,younger}-than was temporarily disabled!") } // (Some(older), Some(younger)) => DateComparison::OlderOrYounger(older, younger), }; // for each file, get the access time let mut dates: Vec<FileWithDate> = files_of_components .into_iter() .map(|path| { let access_time = path.metadata().unwrap().accessed().unwrap(); let naive_datetime = chrono::DateTime::<Local>::from(access_time).naive_local(); FileWithDate { file: path, access_date: naive_datetime, } }) .collect(); dates.sort_by_key(|f| f.file.clone()); // filter the files by comparing the given date and the files access time let filtered_files: Vec<&FileWithDate> = filter_files_by_date(&date_comp, &dates)?; if dry_run { // if we dry run, we won't have to invalidate caches println!( "dry-run: would delete {} items that are {}...", filtered_files.len(), match date_comp { AgeRelation::FileYoungerThanDate(date) => format!("younger than {}", date), AgeRelation::FileOlderThanDate(date) => format!("older than {}", date), AgeRelation::None => unreachable!( "DateComparisonOlder and Younger or None not supported right now (dry run)" ), }, ); } else { // no dry run / actual run println!( "Deleting {} items that are {}...", filtered_files.len(), match date_comp { AgeRelation::FileYoungerThanDate(date) => format!("younger than {}", date), AgeRelation::FileOlderThanDate(date) => format!("older than {}", date), AgeRelation::None => unreachable!( "DateComparisonOlder and Younger or None not supported right now (no dry run)" ), }, ); filtered_files .into_iter() .map(|fwd| &fwd.file) //.inspect(|p| println!("{}", p.display())) .for_each(|path| { remove_file( path, false, &mut size_changed, None, &DryRunMessage::Default, None, ) }); // invalidate caches that we removed from components_to_remove_from.iter().for_each(|component| { match component { Component::RegistryCrateCache => { registry_pkg_caches.invalidate(); } Component::RegistrySources => { registry_sources_caches.invalidate(); } Component::RegistryIndex => { /* ignore this case */ } Component::GitRepos => { checkouts_cache.invalidate(); } Component::GitDB => { bare_repos_cache.invalidate(); } } }) } // summary is printed from inside main() Ok(()) } #[cfg(test)] mod libtests { use super::*; use pretty_assertions::assert_eq; #[test] fn parse_dates() { assert!(parse_date(&String::new()).is_err()); assert!(parse_date(&String::from("a")).is_err()); assert!(parse_date(&String::from("01.01:2002")).is_err()); assert!(parse_date(&String::from("01.01.2002")).is_err()); // need yyyy.mm.dd assert!(parse_date(&String::from("2002.30.30")).is_err()); assert_eq!( parse_date(&String::from("2002.01.01")) .unwrap() .format("%Y.%m.%d") .to_string(), String::from("2002.01.01") ); assert_eq!( parse_date(&String::from("1234.12.08")) .unwrap() .format("%Y.%m.%d") .to_string(), String::from("1234.12.08") ); assert_eq!( parse_date(&String::from("1990.12.08")) .unwrap() .format("%Y.%m.%d") .to_string(), String::from("1990.12.08") ); assert_eq!( parse_date(&String::from("12:00:00")) .unwrap() .format("%H:%M:%S") .to_string(), String::from("12:00:00") ); assert_eq!( parse_date(&String::from("00:00:00")) .unwrap() .format("%H:%M:%S") .to_string(), String::from("00:00:00") ); } #[test] #[should_panic(expected = "invalid time")] fn parse_dates_panic1() { assert!(parse_date(&String::from("24:00:00")).is_err()); } #[test] #[should_panic(expected = "invalid time")] fn parse_dates_panic2() { assert!(parse_date(&String::from("24:30:24")).is_err()); } #[test] #[should_panic(expected = "invalid time")] fn parse_dates_panic3() { assert!(parse_date(&String::from("30:30:24")).is_err()); } }
35.326471
100
0.531679
feb5bab2433021631c218ae699200e2011f29deb
32,131
use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; use std::string::String; use std::vec::Vec; use serde_json; use errors::prelude::*; use services::crypto::CryptoService; use services::payments::{PaymentsMethodCBs, PaymentsService}; use services::wallet::{RecordOptions, WalletService}; use api::WalletHandle; pub enum PaymentsCommand { RegisterMethod( String, //type PaymentsMethodCBs, //method callbacks Box<Fn(IndyResult<()>) + Send>), CreateAddress( WalletHandle, String, //type String, //config Box<Fn(IndyResult<String>) + Send>), CreateAddressAck( i32, //handle WalletHandle, IndyResult<String /* address */>), ListAddresses( WalletHandle, Box<Fn(IndyResult<String>) + Send>), AddRequestFees( WalletHandle, Option<String>, //submitter did String, //req String, //inputs String, //outputs Option<String>, //extra Box<Fn(IndyResult<(String, String)>) + Send>), AddRequestFeesAck( i32, //handle IndyResult<String>), ParseResponseWithFees( String, //type String, //response Box<Fn(IndyResult<String>) + Send>), ParseResponseWithFeesAck( i32, //handle IndyResult<String>), BuildGetPaymentSourcesRequest( WalletHandle, Option<String>, //submitter did String, //payment address Box<Fn(IndyResult<(String, String)>) + Send>), BuildGetPaymentSourcesRequestAck( i32, //handle IndyResult<String>), ParseGetPaymentSourcesResponse( String, //type String, //response Box<Fn(IndyResult<String>) + Send>), ParseGetPaymentSourcesResponseAck( i32, //cmd_handle IndyResult<String>), BuildPaymentReq( WalletHandle, Option<String>, //submitter did String, //inputs String, //outputs Option<String>, //extra Box<Fn(IndyResult<(String, String)>) + Send>), BuildPaymentReqAck( i32, IndyResult<String>), ParsePaymentResponse( String, //payment_method String, //response Box<Fn(IndyResult<String>) + Send>), ParsePaymentResponseAck( i32, IndyResult<String>), BuildMintReq( WalletHandle, Option<String>, //submitter did String, //outputs Option<String>, //extra Box<Fn(IndyResult<(String, String)>) + Send>), BuildMintReqAck( i32, IndyResult<String>), BuildSetTxnFeesReq( WalletHandle, Option<String>, //submitter did String, //method String, //fees Box<Fn(IndyResult<String>) + Send>), BuildSetTxnFeesReqAck( i32, IndyResult<String>), BuildGetTxnFeesReq( WalletHandle, Option<String>, //submitter did String, //method Box<Fn(IndyResult<String>) + Send>), BuildGetTxnFeesReqAck( i32, IndyResult<String>), ParseGetTxnFeesResponse( String, //method String, //response Box<Fn(IndyResult<String>) + Send>), ParseGetTxnFeesResponseAck( i32, IndyResult<String>), BuildVerifyPaymentReq( WalletHandle, Option<String>, //submitter_did String, //receipt Box<Fn(IndyResult<(String, String)>) + Send>), BuildVerifyPaymentReqAck( i32, IndyResult<String>), ParseVerifyPaymentResponse( String, //payment_method String, //resp_json Box<Fn(IndyResult<String>) + Send>), ParseVerifyPaymentResponseAck( i32, IndyResult<String>), } pub struct PaymentsCommandExecutor { payments_service: Rc<PaymentsService>, wallet_service: Rc<WalletService>, crypto_service: Rc<CryptoService>, pending_callbacks: RefCell<HashMap<i32, Box<Fn(IndyResult<String>) + Send>>>, } impl PaymentsCommandExecutor { pub fn new(payments_service: Rc<PaymentsService>, wallet_service: Rc<WalletService>, crypto_service: Rc<CryptoService>) -> PaymentsCommandExecutor { PaymentsCommandExecutor { payments_service, wallet_service, crypto_service, pending_callbacks: RefCell::new(HashMap::new()), } } pub fn execute(&self, command: PaymentsCommand) { match command { PaymentsCommand::RegisterMethod(type_, method_cbs, cb) => { info!(target: "payments_command_executor", "RegisterMethod command received"); cb(self.register_method(&type_, method_cbs)); } PaymentsCommand::CreateAddress(wallet_handle, type_, config, cb) => { info!(target: "payments_command_executor", "CreateAddress command received"); self.create_address(wallet_handle, &type_, &config, cb); } PaymentsCommand::CreateAddressAck(handle, wallet_handle, result) => { info!(target: "payments_command_executor", "CreateAddressAck command received"); self.create_address_ack(handle, wallet_handle, result); } PaymentsCommand::ListAddresses(wallet_handle, cb) => { info!(target: "payments_command_executor", "ListAddresses command received"); self.list_addresses(wallet_handle, cb); } PaymentsCommand::AddRequestFees(wallet_handle, submitter_did, req, inputs, outputs, extra, cb) => { info!(target: "payments_command_executor", "AddRequestFees command received"); self.add_request_fees(wallet_handle, submitter_did.as_ref().map(String::as_str), &req, &inputs, &outputs, extra.as_ref().map(String::as_str), cb); } PaymentsCommand::AddRequestFeesAck(cmd_handle, result) => { info!(target: "payments_command_executor", "AddRequestFeesAck command received"); self.add_request_fees_ack(cmd_handle, result); } PaymentsCommand::ParseResponseWithFees(type_, response, cb) => { info!(target: "payments_command_executor", "ParseResponseWithFees command received"); self.parse_response_with_fees(&type_, &response, cb); } PaymentsCommand::ParseResponseWithFeesAck(cmd_handle, result) => { info!(target: "payments_command_executor", "ParseResponseWithFeesAck command received"); self.parse_response_with_fees_ack(cmd_handle, result); } PaymentsCommand::BuildGetPaymentSourcesRequest(wallet_handle, submitter_did, payment_address, cb) => { info!(target: "payments_command_executor", "BuildGetPaymentSourcesRequest command received"); self.build_get_payment_sources_request(wallet_handle, submitter_did.as_ref().map(String::as_str), &payment_address, cb); } PaymentsCommand::BuildGetPaymentSourcesRequestAck(cmd_handle, result) => { info!(target: "payments_command_executor", "BuildGetPaymentSourcesRequestAck command received"); self.build_get_payment_sources_request_ack(cmd_handle, result); } PaymentsCommand::ParseGetPaymentSourcesResponse(type_, response, cb) => { info!(target: "payments_command_executor", "ParseGetPaymentSourcesResponse command received"); self.parse_get_payment_sources_response(&type_, &response, cb); } PaymentsCommand::ParseGetPaymentSourcesResponseAck(cmd_handle, result) => { info!(target: "payments_command_executor", "ParseGetPaymentSourcesResponseAck command received"); self.parse_get_payment_sources_response_ack(cmd_handle, result); } PaymentsCommand::BuildPaymentReq(wallet_handle, submitter_did, inputs, outputs, extra, cb) => { info!(target: "payments_command_executor", "BuildPaymentReq command received"); self.build_payment_req(wallet_handle, submitter_did.as_ref().map(String::as_str), &inputs, &outputs, extra.as_ref().map(String::as_str), cb); } PaymentsCommand::BuildPaymentReqAck(cmd_handle, result) => { info!(target: "payments_command_executor", "BuildPaymentReqAck command received"); self.build_payment_req_ack(cmd_handle, result); } PaymentsCommand::ParsePaymentResponse(payment_method, response, cb) => { info!(target: "payments_command_executor", "ParsePaymentResponse command received"); self.parse_payment_response(&payment_method, &response, cb); } PaymentsCommand::ParsePaymentResponseAck(cmd_handle, result) => { info!(target: "payments_command_executor", "ParsePaymentResponseAck command received"); self.parse_payment_response_ack(cmd_handle, result); } PaymentsCommand::BuildMintReq(wallet_handle, submitter_did, outputs, extra, cb) => { info!(target: "payments_command_executor", "BuildMintReq command received"); self.build_mint_req(wallet_handle, submitter_did.as_ref().map(String::as_str), &outputs, extra.as_ref().map(String::as_str), cb); } PaymentsCommand::BuildMintReqAck(cmd_handle, result) => { info!(target: "payments_command_executor", "BuildMintReqAck command received"); self.build_mint_req_ack(cmd_handle, result); } PaymentsCommand::BuildSetTxnFeesReq(wallet_handle, submitter_did, type_, fees, cb) => { info!(target: "payments_command_executor", "BuildSetTxnFeesReq command received"); self.build_set_txn_fees_req(wallet_handle, submitter_did.as_ref().map(String::as_str), &type_, &fees, cb); } PaymentsCommand::BuildSetTxnFeesReqAck(cmd_handle, result) => { info!(target: "payments_command_executor", "BuildSetTxnFeesReqAck command received"); self.build_set_txn_fees_req_ack(cmd_handle, result); } PaymentsCommand::BuildGetTxnFeesReq(wallet_handle, submitter_did, type_, cb) => { info!(target: "payments_command_executor", "BuildGetTxnFeesReq command received"); self.build_get_txn_fees_req(wallet_handle, submitter_did.as_ref().map(String::as_str), &type_, cb); } PaymentsCommand::BuildGetTxnFeesReqAck(cmd_handle, result) => { info!(target: "payments_command_executor", "BuildGetTxnFeesReqAck command received"); self.build_get_txn_fees_req_ack(cmd_handle, result); } PaymentsCommand::ParseGetTxnFeesResponse(type_, response, cb) => { info!(target: "payments_command_executor", "ParseGetTxnFeesResponse command received"); self.parse_get_txn_fees_response(&type_, &response, cb); } PaymentsCommand::ParseGetTxnFeesResponseAck(cmd_handle, result) => { info!(target: "payments_command_executor", "ParseGetTxnFeesResponseAck command received"); self.parse_get_txn_fees_response_ack(cmd_handle, result); } PaymentsCommand::BuildVerifyPaymentReq(wallet_handle, submitter_did, receipt, cb) => { info!(target: "payments_command_executor", "BuildVerifyPaymentReq command received"); self.build_verify_payment_request(wallet_handle, submitter_did.as_ref().map(String::as_str), &receipt, cb); } PaymentsCommand::BuildVerifyPaymentReqAck(command_handle, result) => { info!(target: "payments_command_executor", "BuildVerifyReqAck command received"); self.build_verify_payment_request_ack(command_handle, result); } PaymentsCommand::ParseVerifyPaymentResponse(payment_method, resp_json, cb) => { info!(target: "payments_command_executor", "ParseVerifyPaymentResponse command received"); self.parse_verify_payment_response(&payment_method, &resp_json, cb); } PaymentsCommand::ParseVerifyPaymentResponseAck(command_handle, result) => { info!(target: "payments_command_executor", "ParseVerifyResponseAck command received"); self.parse_verify_payment_response_ack(command_handle, result); } } } fn register_method(&self, type_: &str, methods: PaymentsMethodCBs) -> IndyResult<()> { trace!("register_method >>> type_: {:?}, methods: {:?}", type_, methods); self.payments_service.register_payment_method(type_, methods); let res = Ok(()); trace!("register_method << res: {:?}", res); res } fn create_address(&self, wallet_handle: WalletHandle, type_: &str, config: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("create_address >>> wallet_handle: {:?}, type_: {:?}, config: {:?}", wallet_handle, type_, config); match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () }; self._process_method(cb, &|i| self.payments_service.create_address(i, wallet_handle, type_, config)); trace!("create_address <<<"); } fn create_address_ack(&self, handle: i32, wallet_handle: WalletHandle, result: IndyResult<String>) { trace!("create_address_ack >>> wallet_handle: {:?}, result: {:?}", wallet_handle, result); let total_result: IndyResult<String> = match result { Ok(res) => { //TODO: think about deleting payment_address on wallet save failure self.wallet_service.check(wallet_handle).and( self.wallet_service.add_record(wallet_handle, &self.wallet_service.add_prefix("PaymentAddress"), &res, &res, &HashMap::new()).map(|_| res) ).map_err(IndyError::from) } Err(err) => Err(IndyError::from(err)) }; self._common_ack(handle, total_result, "CreateAddressAck"); trace!("create_address_ack <<<"); } fn list_addresses(&self, wallet_handle: WalletHandle, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("list_addresses >>> wallet_handle: {:?}", wallet_handle); match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; match self.wallet_service.search_records(wallet_handle, &self.wallet_service.add_prefix("PaymentAddress"), "{}", &RecordOptions::id_value()) { Ok(mut search) => { let mut list_addresses: Vec<String> = Vec::new(); while let Ok(Some(payment_address)) = search.fetch_next_record() { match payment_address.get_value() { Some(value) => list_addresses.push(value.to_string()), None => cb(Err(err_msg(IndyErrorKind::InvalidState, "Record value not found"))) } } let json_string = serde_json::to_string(&list_addresses) .to_indy(IndyErrorKind::InvalidState, "Cannot deserialize List of Payment Addresses"); cb(json_string); } Err(err) => cb(Err(err)) } trace!("list_addresses <<<"); } fn add_request_fees(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, req: &str, inputs: &str, outputs: &str, extra: Option<&str>, cb: Box<Fn(IndyResult<(String, String)>) + Send>) { trace!("add_request_fees >>> wallet_handle: {:?}, submitter_did: {:?}, req: {:?}, inputs: {:?}, outputs: {:?}, extra: {:?}", wallet_handle, submitter_did, req, inputs, outputs, extra); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(err)), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(err)), _ => (), }; let method_from_inputs = self.payments_service.parse_method_from_inputs(inputs); let method = if outputs == "[]" { method_from_inputs } else { let method_from_outputs = self.payments_service.parse_method_from_outputs(outputs); PaymentsCommandExecutor::_merge_parse_result(method_from_inputs, method_from_outputs) }; match method { Ok(type_) => { let type_copy = type_.to_string(); self._process_method( Box::new(move |result| cb(result.map(|e| (e, type_.to_string())))), &|i| self.payments_service.add_request_fees(i, &type_copy, wallet_handle, submitter_did, req, inputs, outputs, extra), ); } Err(error) => { cb(Err(error)) } }; trace!("add_request_fees <<<"); } fn add_request_fees_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("add_request_fees_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "AddRequestFeesAck"); trace!("add_request_fees_ack <<<"); } fn parse_response_with_fees(&self, type_: &str, response: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("parse_response_with_fees >>> type_: {:?}, response: {:?}", type_, response); self._process_method(cb, &|i| self.payments_service.parse_response_with_fees(i, type_, response)); trace!("parse_response_with_fees <<<"); } fn parse_response_with_fees_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("parse_response_with_fees_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "ParseResponseWithFeesFeesAck"); trace!("parse_response_with_fees_ack <<<"); } fn build_get_payment_sources_request(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, payment_address: &str, cb: Box<Fn(IndyResult<(String, String)>) + Send>) { trace!("build_get_payment_sources_request >>> wallet_handle: {:?}, submitter_did: {:?}, payment_address: {:?}", wallet_handle, submitter_did, payment_address); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; let method = match self.payments_service.parse_method_from_payment_address(payment_address) { Ok(method) => method, Err(err) => { cb(Err(IndyError::from(err))); return; } }; let method_copy = method.to_string(); self._process_method( Box::new(move |get_sources_txn_json| cb(get_sources_txn_json.map(|s| (s, method.to_string())))), &|i| self.payments_service.build_get_payment_sources_request(i, &method_copy, wallet_handle, submitter_did, payment_address), ); trace!("build_get_payment_sources_request <<<"); } fn build_get_payment_sources_request_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_get_payment_sources_request_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildGetSourcesRequestAck"); trace!("build_get_payment_sources_request_ack <<<"); } fn parse_get_payment_sources_response(&self, type_: &str, response: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("parse_get_payment_sources_response >>> response: {:?}", response); self._process_method(cb, &|i| self.payments_service.parse_get_payment_sources_response(i, type_, response)); trace!("parse_get_payment_sources_response <<<"); } fn parse_get_payment_sources_response_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("parse_get_payment_sources_response_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "ParseGetSourcesResponseAck"); trace!("parse_get_payment_sources_response_ack <<<"); } fn build_payment_req(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, inputs: &str, outputs: &str, extra: Option<&str>, cb: Box<Fn(IndyResult<(String, String)>) + Send>) { trace!("build_payment_req >>> wallet_handle: {:?}, submitter_did: {:?}, inputs: {:?}, outputs: {:?}, extra: {:?}", wallet_handle, submitter_did, inputs, outputs, extra); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () }; let method_from_inputs = self.payments_service.parse_method_from_inputs(inputs); let method_from_outputs = self.payments_service.parse_method_from_outputs(outputs); let method = PaymentsCommandExecutor::_merge_parse_result(method_from_inputs, method_from_outputs); match method { Ok(type_) => { let type_copy = type_.to_string(); self._process_method( Box::new(move |result| cb(result.map(|s| (s, type_.to_string())))), &|i| self.payments_service.build_payment_req(i, &type_copy, wallet_handle, submitter_did, inputs, outputs, extra), ); } Err(error) => { cb(Err(IndyError::from(error))) } } trace!("build_payment_req <<<"); } fn build_payment_req_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_payment_req_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildPaymentReqAck"); trace!("build_payment_req_ack <<<"); } fn parse_payment_response(&self, payment_method: &str, response: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("parse_payment_response >>> response: {:?}", response); self._process_method(cb, &|i| self.payments_service.parse_payment_response(i, payment_method, response)); trace!("parse_payment_response <<<"); } fn parse_payment_response_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("parse_payment_response_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "ParsePaymentResponseAck"); trace!("parse_payment_response_ack <<<"); } fn build_mint_req(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, outputs: &str, extra: Option<&str>, cb: Box<Fn(IndyResult<(String, String)>) + Send>) { trace!("build_mint_req >>> wallet_handle: {:?}, submitter_did: {:?}, outputs: {:?}, extra: {:?}", wallet_handle, submitter_did, outputs, extra); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { //TODO: move to helper Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; match self.payments_service.parse_method_from_outputs(outputs) { Ok(type_) => { let type_copy = type_.to_string(); self._process_method( Box::new(move |result| cb(result.map(|s| (s, type_.to_string())))), &|i| self.payments_service.build_mint_req(i, &type_copy, wallet_handle, submitter_did, outputs, extra), ); } Err(error) => cb(Err(IndyError::from(error))) } trace!("build_mint_req <<<"); } fn build_mint_req_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_mint_req_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildMintReqAck"); trace!("build_mint_req_ack <<<"); } fn build_set_txn_fees_req(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, type_: &str, fees: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("build_set_txn_fees_req >>> wallet_handle: {:?}, submitter_did: {:?}, type_: {:?}, fees: {:?}", wallet_handle, submitter_did, type_, fees); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; match serde_json::from_str::<HashMap<String, i64>>(fees) { Err(err) => { error!("Cannot deserialize Fees: {:?}", err); cb(Err(err.to_indy(IndyErrorKind::InvalidStructure, "Cannot deserialize Fees"))) } _ => self._process_method(cb, &|i| self.payments_service.build_set_txn_fees_req(i, type_, wallet_handle, submitter_did, fees)), }; trace!("build_set_txn_fees_req <<<"); } fn build_set_txn_fees_req_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_set_txn_fees_req_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildSetTxnFeesReq"); trace!("build_set_txn_fees_req_ack <<<"); } fn build_get_txn_fees_req(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, type_: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("build_get_txn_fees_req >>> wallet_handle: {:?}, submitter_did: {:?}, type_: {:?}", wallet_handle, submitter_did, type_); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; self._process_method(cb, &|i| self.payments_service.build_get_txn_fees_req(i, type_, wallet_handle, submitter_did)); trace!("build_get_txn_fees_req <<<"); } fn build_get_txn_fees_req_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_get_txn_fees_req_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildGetTxnFeesReqAck"); trace!("build_get_txn_fees_req_ack <<<"); } fn parse_get_txn_fees_response(&self, type_: &str, response: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("parse_get_txn_fees_response >>> response: {:?}", response); self._process_method(cb, &|i| self.payments_service.parse_get_txn_fees_response(i, type_, response)); trace!("parse_get_txn_fees_response <<<"); } fn parse_get_txn_fees_response_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("parse_get_txn_fees_response_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "ParseGetTxnFeesResponseAck"); trace!("parse_get_txn_fees_response_ack <<<"); } fn build_verify_payment_request(&self, wallet_handle: WalletHandle, submitter_did: Option<&str>, receipt: &str, cb: Box<Fn(IndyResult<(String, String)>) + Send>) { trace!("build_verify_payment_request >>> wallet_handle: {:?}, submitter_did: {:?}, receipt: {:?}", wallet_handle, submitter_did, receipt); if let Some(did) = submitter_did { match self.crypto_service.validate_did(did).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => () } } match self.wallet_service.check(wallet_handle).map_err(map_err_err!()) { Err(err) => return cb(Err(IndyError::from(err))), _ => (), }; let method = match self.payments_service.parse_method_from_payment_address(receipt) { Ok(method) => method, Err(err) => { cb(Err(IndyError::from(err))); return; } }; let method_copy = method.to_string(); self._process_method( Box::new(move |result| cb(result.map(|s| (s, method.to_string())))), &|i| self.payments_service.build_verify_payment_req(i, &method_copy, wallet_handle, submitter_did, receipt), ); trace!("build_verify_payment_request <<<"); } fn build_verify_payment_request_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("build_verify_payment_request_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "BuildVerifyPaymentReqAck"); trace!("build_verify_payment_request_ack <<<"); } fn parse_verify_payment_response(&self, type_: &str, resp_json: &str, cb: Box<Fn(IndyResult<String>) + Send>) { trace!("parse_verify_payment_response >>> response: {:?}", resp_json); self._process_method(cb, &|i| self.payments_service.parse_verify_payment_response(i, type_, resp_json)); trace!("parse_verify_payment_response <<<"); } fn parse_verify_payment_response_ack(&self, cmd_handle: i32, result: IndyResult<String>) { trace!("parse_verify_payment_response_ack >>> result: {:?}", result); self._common_ack_payments(cmd_handle, result, "ParseVerifyPaymentResponseAck"); trace!("parse_verify_payment_response_ack <<<"); } // HELPERS fn _process_method(&self, cb: Box<Fn(IndyResult<String>) + Send>, method: &Fn(i32) -> IndyResult<()>) { let cmd_handle = ::utils::sequence::get_next_id(); match method(cmd_handle) { Ok(()) => { self.pending_callbacks.borrow_mut().insert(cmd_handle, cb); } Err(err) => cb(Err(IndyError::from(err))) } } fn _common_ack_payments(&self, cmd_handle: i32, result: IndyResult<String>, name: &str) { self._common_ack(cmd_handle, result.map_err(IndyError::from), name) } fn _common_ack(&self, cmd_handle: i32, result: IndyResult<String>, name: &str) { match self.pending_callbacks.borrow_mut().remove(&cmd_handle) { Some(cb) => { cb(result) } None => error!("Can't process PaymentsCommand::{} for handle {} with result {:?} - appropriate callback not found!", name, cmd_handle, result), } } fn _merge_parse_result(method_from_inputs: IndyResult<String>, method_from_outputs: IndyResult<String>) -> IndyResult<String> { match (method_from_inputs, method_from_outputs) { (Err(err), _) | (_, Err(err)) => Err(err), (Ok(ref mth1), Ok(ref mth2)) if mth1 != mth2 => { error!("Different payment method in inputs and outputs"); Err(err_msg(IndyErrorKind::IncompatiblePaymentMethods, "Different payment method in inputs and outputs")) } (Ok(mth1), Ok(_)) => Ok(mth1) } } }
48.172414
201
0.619744
e4cc4ecd6a06bc38605acc833c2f70c392ff7ec0
29,022
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::Stash as StorageStash; use crate::{ traits::{ KeyPtr, SpreadLayout, }, Lazy, }; use ink_primitives::Key; #[test] fn regression_stash_unreachable_minified() { // This regression has been discovered in the ERC-721 example implementation // `approved_for_all_works` unit test. The fix was to adjust // `Stash::remove_vacant_entry` to update `header.last_vacant` if the // removed index was the last remaining vacant index in the stash. ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { let mut stash: StorageStash<u32> = StorageStash::new(); stash.put(1); stash.put(2); stash.take(0); stash.put(99); stash.take(1); stash.put(99); Ok(()) }) .unwrap() } #[test] fn new_works() { // `StorageVec::new` let stash = <StorageStash<i32>>::new(); assert!(stash.is_empty()); assert_eq!(stash.len(), 0); assert_eq!(stash.get(0), None); assert!(stash.iter().next().is_none()); // `StorageVec::default` let default = <StorageStash<i32> as Default>::default(); assert!(default.is_empty()); assert_eq!(default.len(), 0); assert_eq!(stash.get(0), None); assert!(default.iter().next().is_none()); // `StorageVec::new` and `StorageVec::default` should be equal. assert_eq!(stash, default); } #[test] fn from_iterator_works() { let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; let stash = test_values.iter().copied().collect::<StorageStash<_>>(); assert_eq!(stash, { let mut stash = StorageStash::new(); for (index, value) in test_values.iter().enumerate() { assert_eq!(index as u32, stash.put(*value)); } stash }); assert_eq!(stash.len(), test_values.len() as u32); assert!(!stash.is_empty()); } #[test] fn from_empty_iterator_works() { assert_eq!( [].iter().copied().collect::<StorageStash<i32>>(), StorageStash::new(), ); } #[test] fn take_from_filled_works() { let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); for (index, expected_value) in test_values.iter().enumerate() { assert_eq!(stash.take(index as u32), Some(*expected_value)); } } #[test] fn take_from_empty_works() { let mut stash = <StorageStash<u8>>::new(); assert_eq!(stash.take(0), None); } #[test] fn take_out_of_bounds_works() { let mut stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(stash.take(3), None); } #[test] fn remove_from_filled_works() { let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); let mut count = stash.len(); for (index, val) in test_values.iter().enumerate() { let index = index as u32; assert_eq!(stash.get(index), Some(val)); assert_eq!(unsafe { stash.remove_occupied(index) }, Some(())); assert_eq!(stash.get(index), None); count -= 1; assert_eq!(stash.len(), count); } assert_eq!(stash.len(), 0); } #[test] fn remove_from_empty_works() { let mut stash = <StorageStash<u8>>::new(); assert_eq!(unsafe { stash.remove_occupied(0) }, None); } #[test] fn remove_out_of_bounds_works() { let mut stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(unsafe { stash.remove_occupied(3) }, None); } #[test] fn remove_works_with_spread_layout_push_pull() -> ink_env::Result<()> { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { // First populate some storage Stash and writes that to the contract storage using pull_spread // and some known Key. let stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); let root_key = Key::from([0x00; 32]); SpreadLayout::push_spread(&stash, &mut KeyPtr::from(root_key)); // Then load another instance from the same key lazily and remove some of // the known-to-be-populated entries from it. Afterwards push_spread this second instance and // load yet another using pull_spread again. let mut stash2 = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); assert_eq!(unsafe { stash2.remove_occupied(0) }, Some(())); SpreadLayout::push_spread(&stash2, &mut KeyPtr::from(root_key)); // This time we check from the third instance using // get if the expected cells are still there or have been successfully removed. let stash3 = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); assert_eq!(stash3.get(0), None); assert_eq!(stash3.get(1), Some(&b'B')); assert_eq!(stash3.get(2), Some(&b'C')); assert_eq!(stash3.len(), 2); Ok(()) }) } #[test] fn get_works() { let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); for (index, &expected_value) in test_values.iter().enumerate() { let mut expected_value = expected_value; let index = index as u32; assert_eq!(stash.get(index), Some(&expected_value)); assert_eq!(stash.get_mut(index), Some(&mut expected_value)); assert_eq!(&stash[index], &expected_value); assert_eq!(&mut stash[index], &mut expected_value); } // Get out of bounds works: let len = stash.len(); assert_eq!(stash.get(len), None); assert_eq!(stash.get_mut(len), None); // Get vacant entry works: assert_eq!(stash.get(1), Some(&b'B')); assert_eq!(stash.get_mut(1), Some(&mut b'B')); assert_eq!(stash.take(1), Some(b'B')); assert_eq!(stash.get(1), None); assert_eq!(stash.get_mut(1), None); } #[cfg(debug_assertions)] #[test] #[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] fn index_out_of_bounds_works() { let test_values = [b'a', b'b', b'c']; let stash = test_values.iter().copied().collect::<StorageStash<_>>(); let _ = &stash[test_values.len() as u32]; } #[cfg(debug_assertions)] #[test] #[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] fn index_mut_out_of_bounds_works() { let test_values = [b'a', b'b', b'c']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); let _ = &mut stash[test_values.len() as u32]; } #[test] #[should_panic(expected = "indexed vacant entry: at index 1")] fn index_vacant_works() { let test_values = [b'a', b'b', b'c']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); assert_eq!(stash.take(1), Some(b'b')); let _ = &stash[1]; } #[test] #[should_panic(expected = "indexed vacant entry: at index 1")] fn index_mut_vacant_works() { let test_values = [b'a', b'b', b'c']; let mut stash = test_values.iter().copied().collect::<StorageStash<_>>(); assert_eq!(stash.take(1), Some(b'b')); let _ = &mut stash[1]; } #[test] fn len_is_empty_works() { let mut stash = StorageStash::new(); assert_eq!(stash.len(), 0); assert!(stash.is_empty()); stash.put(b'A'); assert_eq!(stash.len(), 1); assert!(!stash.is_empty()); stash.take(0); assert_eq!(stash.len(), 0); assert!(stash.is_empty()); } #[test] fn iter_works() { let stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); // Test iterator over shared references. let mut iter = stash.iter(); assert_eq!(iter.count(), 3); assert_eq!(iter.next(), Some(&b'A')); assert_eq!(iter.count(), 2); assert_eq!(iter.next(), Some(&b'B')); assert_eq!(iter.count(), 1); assert_eq!(iter.next(), Some(&b'C')); assert_eq!(iter.count(), 0); assert_eq!(iter.next(), None); // Test iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut(); assert_eq!(iter.next(), Some(&mut b'A')); assert_eq!(iter.next(), Some(&mut b'B')); assert_eq!(iter.next(), Some(&mut b'C')); assert_eq!(iter.next(), None); assert_eq!(iter.count(), 0); } /// Create a stash that only has vacant entries. fn create_vacant_stash() -> StorageStash<u8> { let mut stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); for i in 0..stash.len() { stash.take(i); } assert_eq!(stash.len(), 0); assert!(stash.is_empty()); assert_eq!(stash.len_entries(), 3); stash } /// Create a stash where every second entry is vacant. fn create_holey_stash() -> StorageStash<u8> { let elements = [b'A', b'B', b'C', b'D', b'E', b'F']; let mut stash = elements.iter().copied().collect::<StorageStash<_>>(); for i in 0..stash.len() { stash.take(i * 2); } assert_eq!(stash.len() as usize, elements.len() / 2); assert!(!stash.is_empty()); assert_eq!(stash.len_entries() as usize, elements.len()); stash } #[test] fn iter_over_vacant_works() { let stash = create_vacant_stash(); // Test iterator over shared references. let mut iter = stash.iter(); assert_eq!(iter.count(), 0); assert_eq!(iter.next(), None); // Test iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut(); assert_eq!(iter.next(), None); // Test reverse iterator over shared references. let mut iter = stash.iter().rev(); assert_eq!(iter.clone().count(), 0); assert_eq!(iter.next(), None); // Test reverse iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut().rev(); assert_eq!(iter.next(), None); } #[test] fn iter_over_holey_works() { let stash = create_holey_stash(); // Test iterator over shared references. let mut iter = stash.iter(); assert_eq!(iter.count(), 3); assert_eq!(iter.next(), Some(&b'B')); assert_eq!(iter.count(), 2); assert_eq!(iter.next(), Some(&b'D')); assert_eq!(iter.count(), 1); assert_eq!(iter.next(), Some(&b'F')); assert_eq!(iter.count(), 0); assert_eq!(iter.next(), None); // Test iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut(); assert_eq!(iter.next(), Some(&mut b'B')); assert_eq!(iter.next(), Some(&mut b'D')); assert_eq!(iter.next(), Some(&mut b'F')); assert_eq!(iter.next(), None); assert_eq!(iter.count(), 0); } #[test] fn iter_rev_over_holey_works() { let stash = create_holey_stash(); // Test iterator over shared references. let mut iter = stash.iter().rev(); assert_eq!(iter.clone().count(), 3); assert_eq!(iter.next(), Some(&b'F')); assert_eq!(iter.clone().count(), 2); assert_eq!(iter.next(), Some(&b'D')); assert_eq!(iter.clone().count(), 1); assert_eq!(iter.next(), Some(&b'B')); assert_eq!(iter.clone().count(), 0); assert_eq!(iter.next(), None); // Test iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut().rev(); assert_eq!(iter.next(), Some(&mut b'F')); assert_eq!(iter.next(), Some(&mut b'D')); assert_eq!(iter.next(), Some(&mut b'B')); assert_eq!(iter.next(), None); assert_eq!(iter.count(), 0); } #[test] fn iter_rev_works() { let stash = [b'A', b'B', b'C'] .iter() .copied() .collect::<StorageStash<_>>(); // Test iterator over shared references. let mut iter = stash.iter().rev(); assert_eq!(iter.next(), Some(&b'C')); assert_eq!(iter.next(), Some(&b'B')); assert_eq!(iter.next(), Some(&b'A')); assert_eq!(iter.next(), None); // Test iterator over exclusive references. let mut stash = stash; let mut iter = stash.iter_mut().rev(); assert_eq!(iter.next(), Some(&mut b'C')); assert_eq!(iter.next(), Some(&mut b'B')); assert_eq!(iter.next(), Some(&mut b'A')); assert_eq!(iter.next(), None); } #[derive(Debug, Copy, Clone, PartialEq, Eq)] struct EntryMove { from: u32, to: u32, value: u8, } #[test] fn simple_defrag_works() { let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(stash.len(), 6); assert_eq!(stash.len_entries(), 6); assert_eq!(stash.take(3), Some(b'D')); assert_eq!(stash.take(1), Some(b'B')); assert_eq!(stash.take(5), Some(b'F')); assert_eq!(stash.take(4), Some(b'E')); assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 6); // Now stash looks like this: // // i | 0 | 1 | 2 | 3 | 4 | 5 | // next | | | | | | | // prev | | | | | | | // val | A | | C | | | | // // After defrag the stash should look like this: // // i | 0 | 1 | // next | | | // prev | | | // val | A | C | let mut entry_moves = Vec::new(); let callback = |from, to, value: &u8| { entry_moves.push(EntryMove { from, to, value: *value, }); }; assert_eq!(stash.defrag(None, callback), 4); assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 2); assert_eq!(stash.get(0), Some(&b'A')); assert_eq!(stash.get(1), Some(&b'C')); assert_eq!( &entry_moves, &[EntryMove { from: 2, to: 1, value: 67 }] ); } /// Returns a storage stash that looks internally like this: /// /// i | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 /// ----------|---|---|---|---|---|---|---|--- /// next | | | | | | | | /// previous | | | | | | | | /// val | | | | | E | | | H fn complex_defrag_setup() -> StorageStash<u8> { let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(stash.len(), 8); assert_eq!(stash.len_entries(), 8); // Remove some of the entries in specific order. assert_eq!(stash.take(0), Some(b'A')); assert_eq!(stash.take(6), Some(b'G')); assert_eq!(stash.take(1), Some(b'B')); assert_eq!(stash.take(5), Some(b'F')); assert_eq!(stash.take(2), Some(b'C')); assert_eq!(stash.take(3), Some(b'D')); assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 8); stash } /// Returns the expected entry move set for the complex defragmentation test. fn complex_defrag_expected_moves() -> &'static [EntryMove] { &[ EntryMove { from: 7, to: 0, value: 72, }, EntryMove { from: 4, to: 1, value: 69, }, ] } #[test] fn complex_defrag_works() { let mut stash = complex_defrag_setup(); let mut entry_moves = Vec::new(); let callback = |from, to, value: &u8| { entry_moves.push(EntryMove { from, to, value: *value, }); }; assert_eq!(stash.defrag(None, callback), 6); // After defrag the stash should look like this: // // i | 0 | 1 | // next | | | // prev | | | // val | H | E | assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 2); assert_eq!(stash.get(0), Some(&b'H')); assert_eq!(stash.get(1), Some(&b'E')); assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); } #[test] fn incremental_defrag_works() { // This tests asserts that incremental defragmentation of storage stashes // yields the same result as immediate defragmentation of the same stash. let mut stash = complex_defrag_setup(); let mut entry_moves = Vec::new(); let mut callback = |from, to, value: &u8| { entry_moves.push(EntryMove { from, to, value: *value, }); }; let len_entries_before = stash.len_entries(); for i in 0..stash.len_entries() { stash.defrag(Some(1), &mut callback); assert_eq!( stash.len_entries(), core::cmp::max(2, len_entries_before - i - 1) ); } // After defrag the stash should look like this: // // i | 0 | 1 | // next | | | // prev | | | // val | H | E | assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 2); assert_eq!(stash.get(0), Some(&b'H')); assert_eq!(stash.get(1), Some(&b'E')); assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); } #[derive(Debug, PartialEq, Eq)] enum Entry { /// Vacant entry with `prev` and `next` links. Vacant(u32, u32), /// Occupied entry with value. Occupied(u8), } fn entries_of_stash(stash: &StorageStash<u8>) -> Vec<Entry> { stash .entries() .map(|entry| { use super::Entry as StashEntry; match entry { StashEntry::Vacant(entry) => Entry::Vacant(entry.prev, entry.next), StashEntry::Occupied(value) => Entry::Occupied(*value), } }) .collect::<Vec<_>>() } #[test] fn take_in_order_works() { let mut stash = [b'A', b'B', b'C', b'D'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(stash.len(), 4); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), None); assert_eq!( entries_of_stash(&stash), vec![ Entry::Occupied(b'A'), Entry::Occupied(b'B'), Entry::Occupied(b'C'), Entry::Occupied(b'D') ] ); // Take first. assert_eq!(stash.take(0), Some(b'A')); assert_eq!(stash.len(), 3); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(0)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Vacant(0, 0), Entry::Occupied(b'B'), Entry::Occupied(b'C'), Entry::Occupied(b'D') ] ); // Take second. assert_eq!(stash.take(1), Some(b'B')); assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(0)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Vacant(1, 1), Entry::Vacant(0, 0), Entry::Occupied(b'C'), Entry::Occupied(b'D') ] ); // Take third. assert_eq!(stash.take(2), Some(b'C')); assert_eq!(stash.len(), 1); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(0)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Vacant(2, 1), Entry::Vacant(0, 2), Entry::Vacant(1, 0), Entry::Occupied(b'D') ] ); // Take last. assert_eq!(stash.take(3), Some(b'D')); assert_eq!(stash.len(), 0); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(0)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Vacant(3, 1), Entry::Vacant(0, 2), Entry::Vacant(1, 3), Entry::Vacant(2, 0), ] ); } #[test] fn take_rev_order_works() { let mut stash = [b'A', b'B', b'C', b'D'] .iter() .copied() .collect::<StorageStash<_>>(); assert_eq!(stash.len(), 4); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), None); assert_eq!( entries_of_stash(&stash), vec![ Entry::Occupied(b'A'), Entry::Occupied(b'B'), Entry::Occupied(b'C'), Entry::Occupied(b'D') ] ); // Take last. assert_eq!(stash.take(3), Some(b'D')); assert_eq!(stash.len(), 3); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(3)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Occupied(b'A'), Entry::Occupied(b'B'), Entry::Occupied(b'C'), Entry::Vacant(3, 3) ] ); // Take third. assert_eq!(stash.take(2), Some(b'C')); assert_eq!(stash.len(), 2); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(2)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Occupied(b'A'), Entry::Occupied(b'B'), Entry::Vacant(3, 3), Entry::Vacant(2, 2) ] ); // Take second. assert_eq!(stash.take(1), Some(b'B')); assert_eq!(stash.len(), 1); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(1)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Occupied(b'A'), Entry::Vacant(3, 2), Entry::Vacant(1, 3), Entry::Vacant(2, 1) ] ); // Take first. assert_eq!(stash.take(0), Some(b'A')); assert_eq!(stash.len(), 0); assert_eq!(stash.len_entries(), 4); assert_eq!(stash.last_vacant_index(), Some(0)); assert_eq!( entries_of_stash(&stash), vec![ Entry::Vacant(3, 1), Entry::Vacant(0, 2), Entry::Vacant(1, 3), Entry::Vacant(2, 0) ] ); } #[test] fn spread_layout_push_pull_works() -> ink_env::Result<()> { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { let stash1 = create_holey_stash(); let root_key = Key::from([0x42; 32]); SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); // Load the pushed storage vector into another instance and check that // both instances are equal: let stash2 = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); assert_eq!(stash1, stash2); Ok(()) }) } #[test] #[should_panic(expected = "storage entry was empty")] fn spread_layout_clear_works() { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { let stash1 = create_holey_stash(); let root_key = Key::from([0x42; 32]); SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); // It has already been asserted that a valid instance can be pulled // from contract storage after a push to the same storage region. // // Now clear the associated storage from `stash1` and check whether // loading another instance from this storage will panic since the // vector's length property cannot read a value: SpreadLayout::clear_spread(&stash1, &mut KeyPtr::from(root_key)); let _ = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); Ok(()) }) .unwrap() } #[test] #[cfg(not(feature = "ink-experimental-engine"))] fn storage_is_cleared_completely_after_pull_lazy() { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { // given let root_key = Key::from([0x42; 32]); let lazy_stash = Lazy::new(create_holey_stash()); SpreadLayout::push_spread(&lazy_stash, &mut KeyPtr::from(root_key)); let pulled_stash = <Lazy<StorageStash<u8>> as SpreadLayout>::pull_spread( &mut KeyPtr::from(root_key), ); // when SpreadLayout::clear_spread(&pulled_stash, &mut KeyPtr::from(root_key)); // then let contract_id = ink_env::test::get_current_contract_account_id::< ink_env::DefaultEnvironment, >() .expect("Cannot get contract id"); let storage_used = ink_env::test::count_used_storage_cells::< ink_env::DefaultEnvironment, >(&contract_id) .expect("used cells must be returned"); assert_eq!(storage_used, 0); Ok(()) }) .unwrap() } #[test] #[should_panic(expected = "storage entry was empty")] #[cfg(not(feature = "ink-experimental-engine"))] fn drop_works() { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { let root_key = Key::from([0x42; 32]); // if the setup panics it should not cause the test to pass let setup_result = std::panic::catch_unwind(|| { let stash = create_holey_stash(); SpreadLayout::push_spread(&stash, &mut KeyPtr::from(root_key)); let _ = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from( root_key, )); // stash is dropped which should clear the cells }); assert!(setup_result.is_ok(), "setup should not panic"); let contract_id = ink_env::test::get_current_contract_account_id::< ink_env::DefaultEnvironment, >() .expect("Cannot get contract id"); let used_cells = ink_env::test::count_used_storage_cells::< ink_env::DefaultEnvironment, >(&contract_id) .expect("used cells must be returned"); assert_eq!(used_cells, 0); let _ = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); Ok(()) }) .unwrap() } #[test] #[cfg(feature = "ink-experimental-engine")] fn storage_is_cleared_completely_after_pull_lazy() { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { // given let root_key = Key::from([0x42; 32]); let lazy_stash = Lazy::new(create_holey_stash()); SpreadLayout::push_spread(&lazy_stash, &mut KeyPtr::from(root_key)); let pulled_stash = <Lazy<StorageStash<u8>> as SpreadLayout>::pull_spread( &mut KeyPtr::from(root_key), ); // when SpreadLayout::clear_spread(&pulled_stash, &mut KeyPtr::from(root_key)); // then let contract_id = ink_env::test::callee::<ink_env::DefaultEnvironment>(); let storage_used = ink_env::test::count_used_storage_cells::< ink_env::DefaultEnvironment, >(&contract_id) .expect("used cells must be returned"); assert_eq!(storage_used, 0); Ok(()) }) .unwrap() } #[test] #[should_panic(expected = "storage entry was empty")] #[cfg(feature = "ink-experimental-engine")] fn drop_works() { ink_env::test::run_test::<ink_env::DefaultEnvironment, _>(|_| { let root_key = Key::from([0x42; 32]); // if the setup panics it should not cause the test to pass let setup_result = std::panic::catch_unwind(|| { let stash = create_holey_stash(); SpreadLayout::push_spread(&stash, &mut KeyPtr::from(root_key)); let _ = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from( root_key, )); // stash is dropped which should clear the cells }); assert!(setup_result.is_ok(), "setup should not panic"); let contract_id = ink_env::test::callee::<ink_env::DefaultEnvironment>(); let used_cells = ink_env::test::count_used_storage_cells::< ink_env::DefaultEnvironment, >(&contract_id) .expect("used cells must be returned"); assert_eq!(used_cells, 0); let _ = <StorageStash<u8> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); Ok(()) }) .unwrap() }
32.942111
103
0.557198
22506da4804c15d355d915187168f29fa281fd70
6,347
// // Copyright 2020 The Project Oak Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // use super::*; use crate::{RuntimeProxy, SecureServerConfiguration}; use maplit::hashmap; use oak_abi::{ label::Label, proto::oak::application::{ node_configuration::ConfigType, ApplicationConfiguration, WebAssemblyConfiguration, }, }; use std::fs::read; use wat::parse_str; fn start_node( wasm_module: Vec<u8>, entrypoint_name: &str, signatures: &[Signature], ) -> Result<(), OakStatus> { crate::tests::init_logging(); let module_name = "oak_module"; let module_hash = sha_256_hex(wasm_module.as_ref()); let application_configuration = ApplicationConfiguration { wasm_modules: hashmap! { module_name.to_string() => wasm_module }, initial_node_configuration: None, }; let signature_table = SignatureTable { values: hashmap! { module_hash => signatures.to_vec() }, }; let proxy = RuntimeProxy::create_runtime( &application_configuration, &SecureServerConfiguration::default(), &signature_table, ); let (_write_handle, read_handle) = proxy.channel_create(&Label::public_untrusted())?; // Check Wasm module signatures. proxy.runtime.verify_module_signatures()?; let result = proxy.node_create( &NodeConfiguration { name: "test".to_string(), config_type: Some(ConfigType::WasmConfig(WebAssemblyConfiguration { wasm_module_name: module_name.to_string(), wasm_entrypoint_name: entrypoint_name.to_string(), })), }, &Label::public_untrusted(), read_handle, ); proxy .channel_close(read_handle) .expect("could not close channel"); // Ensure that the runtime can terminate correctly, regardless of what the node does. proxy.runtime.stop(); result } fn load_signature(signature_path: &str) -> Signature { let signature_file = read(&signature_path).expect("Couldn't read signature file"); let (_, signature) = crate::parse_pem_signature(&signature_file).expect("Couldn't parse signature"); signature } #[test] fn wasm_starting_module_without_content_fails() { // Loads an empty module that does not have the necessary entry point, so it should fail // immediately. let binary = read("testdata/empty.wasm").expect("Couldn't read Wasm file"); // An empty module is equivalent to: [0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00] // From https://docs.rs/wasmi/0.6.2/wasmi/struct.Module.html#method.from_buffer: // Minimal module: // \0asm - magic // 0x01 - version (in little-endian) assert_eq!(binary, vec![0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00]); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_starting_minimal_module_succeeds() { let binary = read("testdata/minimal.wasm").expect("Couldn't read Wasm file"); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(true, result.is_ok()); } #[test] fn wasm_starting_module_missing_an_export_fails() { let binary = read("testdata/missing.wasm").expect("Couldn't read Wasm file"); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_starting_module_with_wrong_export_fails() { let binary = read("testdata/minimal.wasm").expect("Couldn't read Wasm file"); let result = start_node(binary, "oak_other_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_starting_module_with_wrong_signature_fails() { let binary = read("testdata/wrong.wasm").expect("Couldn't read Wasm file"); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_starting_module_with_wrong_signature_2_fails() { // As a source of inspiration for writing tests in future, this test intentionally parses // the module from a string literal as opposed to loading from file. // Wrong signature: oak_main does not take any parameters let wat = r#" (module (func $oak_main) (memory (;0;) 18) (export "memory" (memory 0)) (export "oak_main" (func $oak_main))) "#; let binary = parse_str(wat).unwrap(); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_starting_module_with_wrong_signature_3_fails() { // Wrong signature: oak_main has the correct input parameter, but returns i32 let wat = r#" (module (type (;0;) (func (param i64) (result i32))) (func $oak_main (type 0) i32.const 42) (memory (;0;) 18) (export "memory" (memory 0)) (export "oak_main" (func $oak_main))) "#; let binary = parse_str(wat).unwrap(); let result = start_node(binary, "oak_main", vec![].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); } #[test] fn wasm_verify_module_signature_succeeds() { let binary = read("testdata/minimal.wasm").expect("Couldn't read Wasm file"); let signature = load_signature("testdata/minimal.sign"); let result = start_node(binary, "oak_main", vec![signature].as_ref()); assert_eq!(true, result.is_ok()); } #[test] fn wasm_verify_module_signature_fails() { let binary = read("testdata/minimal.wasm").expect("Couldn't read Wasm file"); let signature = load_signature("testdata/wrong.sign"); let result = start_node(binary, "oak_main", vec![signature].as_ref()); assert_eq!(Some(OakStatus::ErrInvalidArgs), result.err()); }
35.858757
93
0.677958
fb10b8a9b6f8ac7a0583576c6ae47d3324a7fda6
63
mod cache_inner; mod cache_op_executors; mod cache_operations;
15.75
23
0.857143
f55b02afe83b334560723e710755478f6811e62a
8,632
#[doc = "Reader of register DORMANT_WAKE_INTS0"] pub type R = crate::R<u32, super::DORMANT_WAKE_INTS0>; #[doc = "Reader of field `GPIO7_EDGE_HIGH`"] pub type GPIO7_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO7_EDGE_LOW`"] pub type GPIO7_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO7_LEVEL_HIGH`"] pub type GPIO7_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO7_LEVEL_LOW`"] pub type GPIO7_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO6_EDGE_HIGH`"] pub type GPIO6_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO6_EDGE_LOW`"] pub type GPIO6_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO6_LEVEL_HIGH`"] pub type GPIO6_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO6_LEVEL_LOW`"] pub type GPIO6_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO5_EDGE_HIGH`"] pub type GPIO5_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO5_EDGE_LOW`"] pub type GPIO5_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO5_LEVEL_HIGH`"] pub type GPIO5_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO5_LEVEL_LOW`"] pub type GPIO5_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO4_EDGE_HIGH`"] pub type GPIO4_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO4_EDGE_LOW`"] pub type GPIO4_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO4_LEVEL_HIGH`"] pub type GPIO4_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO4_LEVEL_LOW`"] pub type GPIO4_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO3_EDGE_HIGH`"] pub type GPIO3_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO3_EDGE_LOW`"] pub type GPIO3_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO3_LEVEL_HIGH`"] pub type GPIO3_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO3_LEVEL_LOW`"] pub type GPIO3_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO2_EDGE_HIGH`"] pub type GPIO2_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO2_EDGE_LOW`"] pub type GPIO2_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO2_LEVEL_HIGH`"] pub type GPIO2_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO2_LEVEL_LOW`"] pub type GPIO2_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO1_EDGE_HIGH`"] pub type GPIO1_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO1_EDGE_LOW`"] pub type GPIO1_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO1_LEVEL_HIGH`"] pub type GPIO1_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO1_LEVEL_LOW`"] pub type GPIO1_LEVEL_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO0_EDGE_HIGH`"] pub type GPIO0_EDGE_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO0_EDGE_LOW`"] pub type GPIO0_EDGE_LOW_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO0_LEVEL_HIGH`"] pub type GPIO0_LEVEL_HIGH_R = crate::R<bool, bool>; #[doc = "Reader of field `GPIO0_LEVEL_LOW`"] pub type GPIO0_LEVEL_LOW_R = crate::R<bool, bool>; impl R { #[doc = "Bit 31"] #[inline(always)] pub fn gpio7_edge_high(&self) -> GPIO7_EDGE_HIGH_R { GPIO7_EDGE_HIGH_R::new(((self.bits >> 31) & 0x01) != 0) } #[doc = "Bit 30"] #[inline(always)] pub fn gpio7_edge_low(&self) -> GPIO7_EDGE_LOW_R { GPIO7_EDGE_LOW_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 29"] #[inline(always)] pub fn gpio7_level_high(&self) -> GPIO7_LEVEL_HIGH_R { GPIO7_LEVEL_HIGH_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bit 28"] #[inline(always)] pub fn gpio7_level_low(&self) -> GPIO7_LEVEL_LOW_R { GPIO7_LEVEL_LOW_R::new(((self.bits >> 28) & 0x01) != 0) } #[doc = "Bit 27"] #[inline(always)] pub fn gpio6_edge_high(&self) -> GPIO6_EDGE_HIGH_R { GPIO6_EDGE_HIGH_R::new(((self.bits >> 27) & 0x01) != 0) } #[doc = "Bit 26"] #[inline(always)] pub fn gpio6_edge_low(&self) -> GPIO6_EDGE_LOW_R { GPIO6_EDGE_LOW_R::new(((self.bits >> 26) & 0x01) != 0) } #[doc = "Bit 25"] #[inline(always)] pub fn gpio6_level_high(&self) -> GPIO6_LEVEL_HIGH_R { GPIO6_LEVEL_HIGH_R::new(((self.bits >> 25) & 0x01) != 0) } #[doc = "Bit 24"] #[inline(always)] pub fn gpio6_level_low(&self) -> GPIO6_LEVEL_LOW_R { GPIO6_LEVEL_LOW_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bit 23"] #[inline(always)] pub fn gpio5_edge_high(&self) -> GPIO5_EDGE_HIGH_R { GPIO5_EDGE_HIGH_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 22"] #[inline(always)] pub fn gpio5_edge_low(&self) -> GPIO5_EDGE_LOW_R { GPIO5_EDGE_LOW_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 21"] #[inline(always)] pub fn gpio5_level_high(&self) -> GPIO5_LEVEL_HIGH_R { GPIO5_LEVEL_HIGH_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 20"] #[inline(always)] pub fn gpio5_level_low(&self) -> GPIO5_LEVEL_LOW_R { GPIO5_LEVEL_LOW_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 19"] #[inline(always)] pub fn gpio4_edge_high(&self) -> GPIO4_EDGE_HIGH_R { GPIO4_EDGE_HIGH_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 18"] #[inline(always)] pub fn gpio4_edge_low(&self) -> GPIO4_EDGE_LOW_R { GPIO4_EDGE_LOW_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 17"] #[inline(always)] pub fn gpio4_level_high(&self) -> GPIO4_LEVEL_HIGH_R { GPIO4_LEVEL_HIGH_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 16"] #[inline(always)] pub fn gpio4_level_low(&self) -> GPIO4_LEVEL_LOW_R { GPIO4_LEVEL_LOW_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 15"] #[inline(always)] pub fn gpio3_edge_high(&self) -> GPIO3_EDGE_HIGH_R { GPIO3_EDGE_HIGH_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 14"] #[inline(always)] pub fn gpio3_edge_low(&self) -> GPIO3_EDGE_LOW_R { GPIO3_EDGE_LOW_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 13"] #[inline(always)] pub fn gpio3_level_high(&self) -> GPIO3_LEVEL_HIGH_R { GPIO3_LEVEL_HIGH_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 12"] #[inline(always)] pub fn gpio3_level_low(&self) -> GPIO3_LEVEL_LOW_R { GPIO3_LEVEL_LOW_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 11"] #[inline(always)] pub fn gpio2_edge_high(&self) -> GPIO2_EDGE_HIGH_R { GPIO2_EDGE_HIGH_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 10"] #[inline(always)] pub fn gpio2_edge_low(&self) -> GPIO2_EDGE_LOW_R { GPIO2_EDGE_LOW_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 9"] #[inline(always)] pub fn gpio2_level_high(&self) -> GPIO2_LEVEL_HIGH_R { GPIO2_LEVEL_HIGH_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 8"] #[inline(always)] pub fn gpio2_level_low(&self) -> GPIO2_LEVEL_LOW_R { GPIO2_LEVEL_LOW_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 7"] #[inline(always)] pub fn gpio1_edge_high(&self) -> GPIO1_EDGE_HIGH_R { GPIO1_EDGE_HIGH_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 6"] #[inline(always)] pub fn gpio1_edge_low(&self) -> GPIO1_EDGE_LOW_R { GPIO1_EDGE_LOW_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 5"] #[inline(always)] pub fn gpio1_level_high(&self) -> GPIO1_LEVEL_HIGH_R { GPIO1_LEVEL_HIGH_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 4"] #[inline(always)] pub fn gpio1_level_low(&self) -> GPIO1_LEVEL_LOW_R { GPIO1_LEVEL_LOW_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 3"] #[inline(always)] pub fn gpio0_edge_high(&self) -> GPIO0_EDGE_HIGH_R { GPIO0_EDGE_HIGH_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 2"] #[inline(always)] pub fn gpio0_edge_low(&self) -> GPIO0_EDGE_LOW_R { GPIO0_EDGE_LOW_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1"] #[inline(always)] pub fn gpio0_level_high(&self) -> GPIO0_LEVEL_HIGH_R { GPIO0_LEVEL_HIGH_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0"] #[inline(always)] pub fn gpio0_level_low(&self) -> GPIO0_LEVEL_LOW_R { GPIO0_LEVEL_LOW_R::new((self.bits & 0x01) != 0) } }
37.694323
64
0.618744
e6297dc4ed482a68e842743ffb9321aab7ff3284
12,170
extern crate clap; extern crate rust_htslib; extern crate bio; use clap::{Arg, App}; use rust_htslib::bam; use rust_htslib::prelude::*; use bio::io::fasta; #[derive(Clone)] pub struct GenomicInterval { pub tid: u32, pub chrom: String, // chromosome name pub start_pos: u32, // start of interval pub end_pos: u32, // end of interval (inclusive) } pub fn parse_target_names(bam_file: &String) -> Vec<String> { let bam = bam::Reader::from_path(bam_file).unwrap(); let header_view = bam.header(); let target_names_dec: Vec<&[u8]> = header_view.target_names(); let mut target_names: Vec<String> = vec![]; for t_name_dec in target_names_dec { let mut name_vec: Vec<char> = vec![]; for decr in t_name_dec { let dec: u8 = *decr; name_vec.push(dec as char); } let name_string: String = name_vec.into_iter().collect(); target_names.push(name_string); } target_names } pub fn u8_to_string(u: &[u8]) -> String { String::from_utf8(u.to_vec()).unwrap() } pub fn dna_vec(u: &[u8]) -> (Vec<char>) { let mut v: Vec<char> = Vec::with_capacity(u.len()); for cu in u.to_ascii_uppercase() { let c = cu as char; //assert!(c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N'); if c == 'A' || c == 'C' || c == 'G' || c == 'T' || c == 'N' { v.push(c); } else { eprintln!("Warning: Unexpected base \"{}\" encountered. Replaced with \"N\".", c); v.push('N'); } } v } pub fn get_whole_genome_intervals(bam_file: &String) -> Vec<GenomicInterval> { let bam = bam::Reader::from_path(bam_file).unwrap(); let header_view = bam.header(); let target_names_dec: Vec<&[u8]> = header_view.target_names(); let mut intervals: Vec<GenomicInterval> = vec![]; for (tid, t_name_dec) in target_names_dec.iter().enumerate() { let mut name_vec: Vec<char> = vec![]; for decr in t_name_dec.iter() { let dec: u8 = *decr; name_vec.push(dec as char); } let name_string: String = name_vec.into_iter().collect(); intervals.push(GenomicInterval{ tid: tid as u32, chrom: name_string, start_pos: 0, end_pos: header_view.target_len(tid as u32).unwrap()-1 }); } intervals } // given a bam file name and a possible genomic interval, // if the interval exists then just return a vector holding that lone interval // otherwise, if the interval is None, // return a vector holding GenomicIntervals representing the whole genome. pub fn get_interval_lst(bam_file: &String, interval: &Option<GenomicInterval>) -> Vec<GenomicInterval> { match interval { &Some(ref iv) => { vec![iv.clone()] } &None => { get_whole_genome_intervals(bam_file) } } } // this is really ugly. TODO a less verbose implementation pub fn parse_region_string(region_string: Option<&str>, bamfile_name: &String) -> Option<GenomicInterval> { let bam = bam::Reader::from_path(bamfile_name).unwrap(); match region_string { Some(r) if r.contains(":") && r.contains("-") => { let split1: Vec<&str> = r.split(":").collect(); if split1.len() != 2 { panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>"); } let split2: Vec<&str> = split1[1].split("-").collect(); if split2.len() != 2 { panic!("Invalid format for region. Please use <chrom> or <chrom:start-stop>"); } let iv_chrom = split1[0].to_string(); let iv_start = split2[0].parse::<u32>().expect("Invalid position value specified in region string."); let iv_end = split2[1].parse::<u32>().expect("Invalid position value specified in region string."); let mut tid: u32 = 0; for name in bam.header().target_names() { if u8_to_string(name) == iv_chrom { break; } tid += 1; } if tid as usize == bam.header().target_names().len() { panic!("Chromosome name for region is not in BAM file."); } Some(GenomicInterval { tid: tid, chrom: iv_chrom, start_pos: iv_start - 1, end_pos: iv_end - 1, }) } Some(r) => { let r_str = r.to_string(); let mut tid: u32 = 0; for name in bam.header().target_names() { if u8_to_string(name) == r_str { break; } tid += 1; } if tid as usize == bam.header().target_names().len() { panic!("Chromosome name for region is not in BAM file."); } let tlen = bam.header().target_len(tid).unwrap(); Some(GenomicInterval { tid: tid, chrom: r_str, start_pos: 0, end_pos: tlen - 1, }) } None => None, } } pub fn count_mapped_reads(bam_file: &String, fasta_file: &String, interval: &Option<GenomicInterval>, min_coverage: u32, min_mapq: u8, min_map_frac: f64, mapped_count_mode: bool) { let target_names = parse_target_names(&bam_file); let mut fasta = fasta::IndexedReader::from_file(&fasta_file).unwrap(); // pileup over all covered sites let mut ref_seq: Vec<char> = vec![]; let mut prev_tid = 4294967295; let a_str = "A".to_string(); let c_str = "C".to_string(); let g_str = "G".to_string(); let t_str = "T".to_string(); let interval_lst: Vec<GenomicInterval> = get_interval_lst(bam_file, interval); let mut bam_ix = bam::IndexedReader::from_path(bam_file).unwrap(); let mut count = 0; for iv in interval_lst { bam_ix.fetch(iv.tid as u32, iv.start_pos as u32, iv.end_pos as u32 + 1).ok().expect("Error seeking BAM file while extracting fragments."); let bam_pileup = bam_ix.pileup(); for p in bam_pileup { let pileup = p.unwrap(); let tid: usize = pileup.tid() as usize; let chrom: String = target_names[tid].clone(); let pos0: usize = pileup.pos() as usize; if chrom != iv.chrom || pos0 < iv.start_pos as usize || pos0 > iv.end_pos as usize { continue; } if tid != prev_tid { let mut ref_seq_u8: Vec<u8> = vec![]; fasta.read_all(&chrom, &mut ref_seq_u8).expect("Failed to read fasta sequence record."); ref_seq = dna_vec(&ref_seq_u8); } let ref_base_str = (ref_seq[pileup.pos() as usize]).to_string(); if ref_base_str.contains("N") { continue; } assert!(ref_base_str == a_str || ref_base_str == c_str || ref_base_str == g_str || ref_base_str == t_str); let mut depth: usize = 0; let mut well_mapped: usize = 0; // pileup the bases for a single position and count number of each base for alignment in pileup.alignments() { let record = alignment.record(); // may be faster to implement this as bitwise operation on raw flag in the future? if record.is_secondary() || record.is_quality_check_failed() || record.is_duplicate() || record.is_supplementary() { continue; } depth += 1; if record.is_unmapped() || record.mapq() < min_mapq { continue; } well_mapped += 1; } let well_mapped_frac = well_mapped as f64 / depth as f64; if mapped_count_mode { if well_mapped >= min_coverage as usize { count += 1; } } else { if depth >= min_coverage as usize && well_mapped_frac >= min_map_frac { count += 1; } } prev_tid = tid; } } println!("{}",count); } fn main() { let input_args = App::new("Map Counter") .version("0.1") .author("Peter Edge <[email protected]>") .about("Given a bam, count the number of positions exceeding a given min coverage and \"well-mapped\" fraction.") .arg(Arg::with_name("Input BAM") .short("b") .long("bam") .value_name("BAM") .help("sorted, indexed BAM file.") .display_order(10) .required(true) .takes_value(true)) .arg(Arg::with_name("Input FASTA") .short("r") .long("ref") .value_name("FASTA") .help("indexed fasta reference that BAM file is aligned to") .display_order(20) .required(true) .takes_value(true)) .arg(Arg::with_name("Chrom") .short("C") .long("chrom") .value_name("string") .help("Chromosome to limit analysis to.") .display_order(30) .takes_value(true)) .arg(Arg::with_name("Min coverage") .short("c") .long("min_cov") .value_name("int") .help("Minimum coverage (of reads passing filters) to consider position as a potential SNV.") .display_order(40) .required(true) .default_value("0")) .arg(Arg::with_name("Well-mapped fraction") .short("f") .long("map_frac") .value_name("float") .help("Minimum fraction of mapped reads with mapq >= MAPQ_CUTOFF.") .display_order(50) .required(true) .default_value("0")) .arg(Arg::with_name("Min mapq") .short("q") .long("min_mapq") .value_name("int") .help("Map quality cutoff (for calculating well-mapped fraction).") .display_order(60) .default_value("60")) .arg(Arg::with_name("Mapped read count mode") .short("m") .long("mapped_count_mode") .help("Ignore map fraction and use total mapped read count. \ Return the total number of positions with at least min_cov reads having mapq>=min_mapq. \ Default behavior is to return the number of positions with at least min_cov reads, where \ at least map_frac of them have mapq>=min_mapq") .display_order(161)) .get_matches(); let bamfile_name = input_args.value_of("Input BAM").unwrap().to_string(); let fastafile_name = input_args.value_of("Input FASTA").unwrap().to_string(); let interval: Option<GenomicInterval> = parse_region_string(input_args.value_of("Chrom"), &bamfile_name); let min_mapq = input_args.value_of("Min mapq") .unwrap() .parse::<u8>() .expect("Argument min_mapq must be an int!"); let min_cov = input_args.value_of("Min coverage") .unwrap() .parse::<u32>() .expect("Argument min_cov must be an int!"); let min_map_frac = input_args.value_of("Well-mapped fraction") .unwrap() .parse::<f64>() .expect("Argument map_frac must be a positive float!"); let mapped_count_mode: bool = match input_args.occurrences_of("Mapped read count mode") { 0 => {false}, 1 => {true}, _ => { panic!("mapped_count_mode specified multiple times"); } }; count_mapped_reads(&bamfile_name, &fastafile_name, &interval, min_cov, min_mapq, min_map_frac, mapped_count_mode); }
33.805556
146
0.534758
e5e46a17388ba4e16ad32b8516eccab82cdc2342
45,611
//! An implementation of dynamic automatic differentiation //! with forward and reverse modes #![allow(non_snake_case)] // use std::borrow::{Borrow, BorrowMut}; use std::cell::RefCell; use std::cmp::{Eq, PartialEq}; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::rc::{Rc, Weak}; #[cfg(test)] use std::sync::{atomic, Arc}; #[derive(Clone, Debug)] pub struct PtrVWrap(pub Rc<RefCell<VWrap>>); impl Hash for PtrVWrap { fn hash<H: Hasher>(&self, state: &mut H) { let p = Rc::downgrade(&self.0); (Weak::as_raw(&p) as usize).hash(state); } } impl PartialEq for PtrVWrap { fn eq(&self, other: &Self) -> bool { //decay both to Weak and compare Weak::ptr_eq(&Rc::downgrade(&self.0), &Rc::downgrade(&other.0)) } } impl Eq for PtrVWrap {} use crate::valtype::ValType; #[cfg(test)] lazy_static! { static ref ID: Arc<atomic::AtomicUsize> = Arc::new(atomic::AtomicUsize::new(0)); } #[cfg(test)] fn get_id() -> i32 { ID.fetch_add(1, atomic::Ordering::SeqCst) as _ } /// wrapper for variable with recording of dependencies // #[derive(Debug)] pub struct VWrap { /// input dependencies pub inp: Vec<PtrVWrap>, /// source function raw: Box<dyn FWrap>, /// evaluated value pub val: Option<ValType>, #[cfg(test)] pub id: i32, pub eval_g: bool, /// adjoint accumulation expression pub adj_accum: Option<PtrVWrap>, } use std::fmt; impl fmt::Debug for VWrap { #[cfg(test)] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!( f, "VWrap {{ inp: {:#?}, raw:: {:?}, val: {:?}, id: {:?}, eval_g: {:?} }}", self.inp, self.raw, self.val, self.id, self.eval_g ) } #[cfg(not(test))] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!( f, "VWrap {{ inp: {:#?}, raw:: {:?}, val: {:?}, eval_g: {:?} }}", self.inp, self.raw, self.val, self.eval_g ) } } /// initializer functions #[allow(dead_code)] impl VWrap { fn new(v: Box<dyn FWrap>) -> PtrVWrap { PtrVWrap(Rc::new(RefCell::new(VWrap { inp: vec![], raw: v, val: None, #[cfg(test)] id: get_id(), eval_g: false, adj_accum: None, }))) } fn new_with_input(f: Box<dyn FWrap>, v: Vec<PtrVWrap>) -> PtrVWrap { PtrVWrap(Rc::new(RefCell::new(VWrap { inp: v, raw: f, val: None, #[cfg(test)] id: get_id(), eval_g: false, adj_accum: None, }))) } fn new_with_val(v: Box<dyn FWrap>, val: ValType) -> PtrVWrap { PtrVWrap(Rc::new(RefCell::new(VWrap { inp: vec![], raw: v, val: Some(val), #[cfg(test)] id: get_id(), eval_g: false, adj_accum: None, }))) } } impl PtrVWrap { fn set_inp(&mut self, v: Vec<PtrVWrap>) { self.0.deref().borrow_mut().inp = v; } pub fn set_val(&mut self, v: ValType) { self.0.deref().borrow_mut().val = Some(v); } /// forward mode (tanget-linear) pub fn apply_fwd(&mut self) -> ValType { let mut args: Vec<(ValType, bool)> = vec![]; //recursive apply for i in self.0.deref().borrow_mut().inp.iter_mut() { let val = i.apply_fwd(); args.push((val, i.0.deref().borrow().eval_g)); } let v = self.0.deref().borrow().raw.f()(args, self.0.deref().borrow().val); self.0.deref().borrow_mut().val = Some(v); v } /// reverse mode (adjoint) fn apply_rev_recurse(&mut self) -> ValType { let mut args: Vec<(ValType, bool)> = vec![]; //recursive apply for i in self.0.deref().borrow_mut().inp.iter_mut() { let val = i.apply_rev_recurse(); let temp = i.0.deref().borrow().eval_g; args.push((val, temp)); } let v = self.0.deref().borrow().raw.f()(args, self.0.deref().borrow().val); self.0.deref().borrow_mut().val = Some(v); v } /// reverse mode (adjoint) pub fn apply_rev(&mut self) -> ValType { let v = self.apply_rev_recurse(); v } /// create adjoint graph starting from current variable and go through input dependencies /// /// resulting sensitivity graphs are propagated to leaf nodes' adjoint accumulation /// where it can be collected pub fn rev(&self) -> HashMap<PtrVWrap, PtrVWrap> { use std::collections::VecDeque; let mut q = VecDeque::new(); let mut adjoints_collected = HashMap::new(); //initialization of sensitity=1 for starting node self.0.deref().borrow_mut().adj_accum = Some(VWrap::new(OpOne::new())); q.push_back(self.clone()); let mut visited: HashSet<PtrVWrap> = HashSet::new(); //breadth-first while !q.is_empty() { let n = q.pop_front().unwrap(); if visited.contains(&n) { //skip already traversed nodes continue; } if n.0.deref().borrow_mut().adj_accum.is_none() { n.0.deref().borrow_mut().adj_accum = Some(VWrap::new(OpZero::new())); } //delegate adjoint calc to operation let adjoints = { let mut f = n.0.deref().borrow().raw.adjoint(); f( n.0.deref().borrow().inp.clone(), n.0.deref() .borrow() .adj_accum .as_ref() .expect("adj_accum empty") .clone(), &n, ) }; assert_eq!(adjoints.len(), n.0.deref().borrow().inp.len()); //propagate adjoints to inputs let l = adjoints.len(); for idx in 0..l { if n.0.deref().borrow_mut().inp[idx] .0 .deref() .borrow_mut() .adj_accum .is_none() { n.0.deref().borrow_mut().inp[idx] .0 .deref() .borrow_mut() .adj_accum = Some(VWrap::new(OpZero::new())); } let temp = n.0.deref().borrow().inp[idx] .0 .deref() .borrow() .adj_accum .as_ref() .unwrap() .clone(); n.0.deref().borrow_mut().inp[idx] .0 .deref() .borrow_mut() .adj_accum = Some(Add(temp, adjoints[idx].clone())); } //reset adjoint accumulation for current node to zero if !n.0.deref().borrow().inp.is_empty() { //reset adjoints for internal nodes n.0.deref().borrow_mut().adj_accum = None; } else { //collect adjoints for leaf nodes let adj = n.0.deref().borrow_mut().adj_accum.take(); adjoints_collected.insert(n.clone(), adj.expect("leaf adjoint missing")); } //do adjoints for inputs for i in n.0.deref().borrow().inp.iter() { q.push_back(i.clone()); } visited.insert(n.clone()); } adjoints_collected } /// create tangent-linear starting from current variable pub fn fwd(&self) -> PtrVWrap { let mut g = self.0.deref().borrow().raw.tangent(); let ret = g(self.0.deref().borrow().inp.clone(), self); ret } /// indicator in fwd propagation pub fn active(&mut self) -> Self { self.0.deref().borrow_mut().eval_g = true; self.clone() } pub fn inactive(&mut self) -> Self { self.0.deref().borrow_mut().eval_g = false; self.clone() } pub fn adjoint(&self) -> Option<PtrVWrap> { self.0.deref().borrow().adj_accum.clone() } pub fn reset_adjoint(&mut self) { self.0.deref().borrow_mut().adj_accum = None; } } /// wrapper for function trait FWrap: std::fmt::Debug { fn new() -> Box<dyn FWrap> where Self: Sized; /// creates a function to evaluate given values fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType>; /// creates a function to evaluate given values for reverse pass fn f_rev(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { self.f() } /// creates linear tangent function with given input dependencies and returns wrapped variable /// used in forward mode fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap>; /// creates function to compute the adjoint for the input dependencies /// used in reverse mode fn adjoint( &self, ) -> Box< dyn FnMut( Vec<PtrVWrap>, /*inputs*/ PtrVWrap, /*accumulated adjoint*/ &PtrVWrap, /*self*/ ) -> Vec<PtrVWrap>, >; } #[derive(Debug, Clone, Copy)] struct OpMul {} #[derive(Debug, Clone, Copy)] struct OpAdd {} #[derive(Debug, Clone, Copy)] struct OpLeaf {} #[derive(Debug, Clone, Copy)] struct OpOne {} /// special link to variable of interest for gradient calc #[derive(Debug, Clone, Copy)] struct OpLink {} #[derive(Debug, Clone, Copy)] struct OpZero {} #[derive(Debug, Clone, Copy)] struct OpConst {} #[derive(Debug, Clone, Copy)] struct OpSin {} #[derive(Debug, Clone, Copy)] struct OpCos {} #[derive(Debug, Clone, Copy)] struct OpTan {} #[derive(Debug, Clone, Copy)] struct OpPow {} #[derive(Debug, Clone, Copy)] struct OpExp {} #[derive(Debug, Clone, Copy)] struct OpLn {} #[derive(Debug, Clone, Copy)] struct OpDiv {} impl FWrap for OpMul { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpMul {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _: Option<ValType>| { assert!(x.len() == 2); match (x[0].0, x[1].0) { (ValType::F(v0), ValType::F(v1)) => ValType::F(v0 * v1), (ValType::I(v0), ValType::I(v1)) => ValType::I(v0 * v1), (ValType::F(v0), ValType::I(v1)) => ValType::F(v0 * v1 as f32), (ValType::I(v0), ValType::F(v1)) => ValType::F(v0 as f32 * v1), _ => { panic!("type not supported"); } } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _: &PtrVWrap| { assert!(args.len() == 2); //apply chain rule: (xy)' = x'y + xy' let a_prime = args[0].fwd(); let m1 = VWrap::new_with_input(OpMul::new(), vec![a_prime, args[1].clone()]); let b_prime = args[1].fwd(); let m2 = VWrap::new_with_input(OpMul::new(), vec![args[0].clone(), b_prime]); VWrap::new_with_input(OpAdd::new(), vec![m1, m2]) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 2); vec![ Mul(inputs[1].clone(), out_adj.clone()), Mul(inputs[0].clone(), out_adj), ] }, ) } } impl FWrap for OpAdd { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpAdd {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _: Option<ValType>| { assert_eq!(x.len(), 2); match (x[0].0, x[1].0) { (ValType::F(v0), ValType::F(v1)) => ValType::F(v0 + v1), (ValType::I(v0), ValType::I(v1)) => ValType::I(v0 + v1), _ => { panic!("type not supported"); } } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _: &PtrVWrap| { //apply rule: (a+b+c+...)' = a'+b'+c'+... let mut inp_grad = vec![]; for i in args.iter() { let d = i.fwd(); inp_grad.push(d); } assert!(inp_grad.len() > 0); let count = inp_grad.len(); if count > 1 { for i in 1..count { let temp = VWrap::new_with_input( OpAdd::new(), vec![inp_grad[i - 1].clone(), inp_grad[i].clone()], ); inp_grad[i] = temp; } } inp_grad[count - 1].clone() }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 2); vec![out_adj.clone(), out_adj] }, ) } } impl FWrap for OpLeaf { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpLeaf {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |_x: Vec<(ValType, bool)>, v: Option<ValType>| v.expect("leaf value missing")) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |_args: Vec<PtrVWrap>, self_ptr: &PtrVWrap| { VWrap::new_with_input(OpLink::new(), vec![self_ptr.clone()]) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, _out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 0); vec![] }, ) } } /// special construct for representing derivative of a variable created in tangent-linear pass impl FWrap for OpLink { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpLink {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); if x[0].1 { //indicator for calculating gradient of the linked variable ValType::F(1.) } else { ValType::F(0.) } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |_args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { VWrap::new_with_val(OpZero::new(), ValType::F(0.)) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, _out_adj: PtrVWrap, _cur: &PtrVWrap| { vec![VWrap::new_with_val(OpZero::new(), ValType::F(0.)); inputs.len()] }, ) } } impl FWrap for OpConst { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpConst {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |_x: Vec<(ValType, bool)>, v: Option<ValType>| v.expect("leaf value missing")) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |_args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { VWrap::new_with_val(OpZero::new(), ValType::F(0.)) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, _out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 0); vec![] }, ) } } impl FWrap for OpOne { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpOne {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |_x: Vec<(ValType, bool)>, _v: Option<ValType>| ValType::F(1.)) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |_args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { VWrap::new_with_val(OpZero::new(), ValType::F(0.)) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, _out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 0); vec![] }, ) } } impl FWrap for OpZero { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpZero {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |_x: Vec<(ValType, bool)>, _v: Option<ValType>| { //todo ValType::F(0.) }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |_args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { VWrap::new_with_val(OpZero::new(), ValType::F(0.)) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, _out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 0); vec![] }, ) } } impl FWrap for OpSin { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpSin {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); match x[0].0 { ValType::F(v0) => ValType::F(v0.sin()), ValType::D(v0) => ValType::D(v0.sin()), ValType::I(v0) => ValType::F((v0 as f32).sin()), ValType::L(v0) => ValType::F((v0 as f32).sin()), } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { assert_eq!(args.len(), 1); VWrap::new_with_input(OpCos::new(), vec![args[0].clone()]) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 1); let a = VWrap::new_with_input(OpCos::new(), vec![inputs[0].clone()]); vec![Mul(a, out_adj.clone())] }, ) } } impl FWrap for OpCos { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpCos {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); match x[0].0 { ValType::F(v0) => ValType::F(v0.cos()), ValType::D(v0) => ValType::D(v0.cos()), ValType::I(v0) => ValType::F((v0 as f32).cos()), ValType::L(v0) => ValType::F((v0 as f32).cos()), } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { assert_eq!(args.len(), 1); Mul( VWrap::new_with_val(OpConst::new(), ValType::F(-1.)), VWrap::new_with_input(OpSin::new(), vec![args[0].clone()]), ) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 1); let a = Mul( VWrap::new_with_val(OpConst::new(), ValType::F(-1.)), VWrap::new_with_input(OpSin::new(), vec![inputs[0].clone()]), ); vec![Mul(a, out_adj.clone())] }, ) } } impl FWrap for OpTan { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpTan {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); match x[0].0 { ValType::F(v0) => ValType::F(v0.tan()), ValType::D(v0) => ValType::D(v0.tan()), ValType::I(v0) => ValType::F((v0 as f32).tan()), ValType::L(v0) => ValType::F((v0 as f32).tan()), } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { //y'=1/(cos(x))^2 assert_eq!(args.len(), 1); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); Mul( Div(one, Mul(Cos(args[0].clone()), Cos(args[0].clone()))), args[0].fwd(), ) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 1); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); let a = Div(one, Mul(Cos(inputs[0].clone()), Cos(inputs[0].clone()))); vec![Mul(a, out_adj.clone())] }, ) } } impl FWrap for OpPow { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpPow {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 2); let base: f32 = x[0].0.into(); let expo: f32 = x[1].0.into(); if expo < 1e-15 && expo > -1e-15 { ValType::F(1.) } else { ValType::F(base.powf(expo)) } }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { //y = x^a = exp(ln(x^a)) = exp(a ln(x)) //y' = exp(a ln(x))( a'*ln(x) + a/x*x') = x^a *(a'*ln(x)+a/x*x') assert_eq!(args.len(), 2); Mul( Pow(args[0].clone(), args[1].clone()), Add( Mul(args[1].fwd(), Ln(args[0].clone())), Mul(Div(args[1].clone(), args[0].clone()), args[0].fwd()), ), ) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { //y = x^a = exp(ln(x^a)) = exp(a ln(x)) //y' = exp(a ln(x))( a'*ln(x) + a/x*x') // = x^(a-1)*a*x' + x^a*ln(x) a' assert_eq!(inputs.len(), 2); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); vec![ Mul( Mul( Pow(inputs[0].clone(), Minus(inputs[1].clone(), one)), inputs[1].clone(), ), out_adj.clone(), ), Mul( Mul( Pow(inputs[0].clone(), inputs[1].clone()), Ln(inputs[0].clone()), ), out_adj.clone(), ), ] }, ) } } impl FWrap for OpExp { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpExp {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); let expo: f32 = x[0].0.into(); ValType::F(expo.exp()) }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { //y=exp(x) //y'=exp(x)*x' assert_eq!(args.len(), 1); Mul(Exp(args[0].clone()), args[0].fwd()) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 1); vec![Mul(Exp(inputs[0].clone()), out_adj.clone())] }, ) } } impl FWrap for OpLn { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpLn {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 1); let expo: f32 = x[0].0.into(); ValType::F(expo.ln()) }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { //y=ln(x) //y'= 1/x *x' assert_eq!(args.len(), 1); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); Mul(Div(one, args[0].clone()), args[0].fwd()) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 1); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); vec![Mul(Div(one, inputs[0].clone()), out_adj.clone())] }, ) } } impl FWrap for OpDiv { fn new() -> Box<dyn FWrap> where Self: Sized, { Box::new(OpDiv {}) } fn f(&self) -> Box<dyn FnMut(Vec<(ValType, bool)>, Option<ValType>) -> ValType> { Box::new(move |x: Vec<(ValType, bool)>, _v: Option<ValType>| { assert!(x.len() == 2); let a: f32 = x[0].0.into(); let b: f32 = x[1].0.into(); ValType::F(a / b) }) } fn tangent(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, &PtrVWrap) -> PtrVWrap> { Box::new(move |args: Vec<PtrVWrap>, _self_ptr: &PtrVWrap| { //y=a/b //y'= (a'b-ab')/(b*b) assert_eq!(args.len(), 2); Div( Minus( Mul(args[0].fwd(), args[1].clone()), Mul(args[0].clone(), args[1].fwd()), ), Mul(args[1].clone(), args[1].clone()), ) }) } fn adjoint(&self) -> Box<dyn FnMut(Vec<PtrVWrap>, PtrVWrap, &PtrVWrap) -> Vec<PtrVWrap>> { Box::new( //y=a/b //y'= (a'b-ab')/(b*b) = a'/b - ab'/(b*b) move |inputs: Vec<PtrVWrap>, out_adj: PtrVWrap, _cur: &PtrVWrap| { assert_eq!(inputs.len(), 2); let one = VWrap::new_with_val(OpConst::new(), ValType::F(1.)); let minus_one = VWrap::new_with_val(OpConst::new(), ValType::F(-1.)); vec![ Mul(Div(one, inputs[1].clone()), out_adj.clone()), Mul( Div( Mul(minus_one, inputs[0].clone()), Mul(inputs[1].clone(), inputs[1].clone()), ), out_adj.clone(), ), ] }, ) } } #[allow(dead_code)] pub fn Mul(arg0: PtrVWrap, arg1: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpMul::new()); a.set_inp(vec![arg0, arg1]); a } #[allow(dead_code)] pub fn Add(arg0: PtrVWrap, arg1: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpAdd::new()); a.set_inp(vec![arg0, arg1]); a } #[allow(dead_code)] pub fn Minus(arg0: PtrVWrap, arg1: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpAdd::new()); let temp = VWrap::new_with_val(OpConst::new(), ValType::F(-1.)); a.set_inp(vec![arg0, Mul(arg1, temp)]); a } #[allow(dead_code)] pub fn Leaf(arg0: ValType) -> PtrVWrap { let a = VWrap::new_with_val(OpLeaf::new(), arg0); a } #[allow(dead_code)] pub fn Sin(arg0: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpSin::new()); a.set_inp(vec![arg0]); a } #[allow(dead_code)] pub fn Cos(arg0: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpCos::new()); a.set_inp(vec![arg0]); a } #[allow(dead_code)] pub fn Tan(arg0: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpTan::new()); a.set_inp(vec![arg0]); a } #[allow(dead_code)] pub fn Exp(arg0: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpExp::new()); a.set_inp(vec![arg0]); a } #[allow(dead_code)] pub fn Ln(arg0: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpLn::new()); a.set_inp(vec![arg0]); a } #[allow(dead_code)] pub fn Div(arg0: PtrVWrap, arg1: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpDiv::new()); a.set_inp(vec![arg0, arg1]); a } #[allow(dead_code)] pub fn Pow(arg0: PtrVWrap, arg1: PtrVWrap) -> PtrVWrap { let mut a = VWrap::new(OpPow::new()); a.set_inp(vec![arg0, arg1]); a } #[cfg(test)] fn eq_f32(a: f32, b: f32) -> bool { (a - b).abs() < 0.01 } #[test] fn test_loop_fwd() { let l0 = Leaf(ValType::F(2.)).active(); let mut l = l0.clone(); for _ in 0..10 { l = Mul(l, Leaf(ValType::F(2.))); } let vl = l.apply_fwd(); dbg!(vl); assert!(eq_f32(vl.into(), 2048.)); let mut g = l.fwd(); let h = g.apply_fwd(); dbg!(h); assert!(eq_f32(h.into(), 1024.)); } #[test] fn test_simple_fwd() { //(3x)' = 3 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(l0.clone(), l1.clone()); let mut b = a.fwd(); let c = b.apply_fwd(); dbg!(c); assert!(eq_f32(c.into(), 3.)); } #[test] fn test_square_fwd() { //(3x^2)' = 6x{x=4} = 24 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1); let mut b = a.fwd(); let c = b.apply_fwd(); dbg!(&c); // dbg!( &l0 ); assert!(eq_f32(c.into(), 24.)); } #[test] fn test_square_fwd_2() { //(x(4)^2)' = 16 let l0 = Leaf(ValType::F(4.)); let l1 = Leaf(ValType::F(3.)).active(); let a = Mul(Mul(l0.clone(), l0.clone()), l1); let mut b = a.fwd(); let c = b.apply_fwd(); dbg!(&c); assert!(eq_f32(c.into(), 16.)); } #[test] fn test_simple_rev() { //(3x)' = 3 let l0 = Leaf(ValType::F(4.)); let l1 = Leaf(ValType::F(3.)); let a = Mul(l0.clone(), l1.clone()); let ret = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); dbg!(ret); assert!(eq_f32(ret.into(), 3.)); } #[test] fn test_simple_rev_2() { //(3x^2)' = 6x{x=4} = 24 let l0 = Leaf(ValType::F(4.)); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let ret = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 24.)); } #[test] fn test_composite_fwd_over_fwd() { //y=3*x^2 where x=4 //compute y'' = (6x)' = 6 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let mut gg = a.fwd().fwd(); let ret = gg.apply_fwd(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); } #[test] fn test_composite_fwd_over_rev() { //y=x*3 where x=4 //compute y'' = (3)' = 0 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(l0.clone(), l1.clone()); let mut adjoints = a.rev(); let adj = adjoints.get_mut(&l0).expect("l0 adjoint missing"); let mut g = adj.fwd(); let ret = g.apply_fwd(); dbg!(&ret); assert!(eq_f32(ret.into(), 0.)); } #[test] fn test_composite_fwd_over_rev_2() { //y=3*x^2 where x=4 //compute y'' = (6x)' = 6 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let ret = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .fwd() .apply_fwd(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); } #[test] fn test_composite_rev_over_rev() { //(3x^2)'' = 6 let l0 = Leaf(ValType::F(4.)); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let ret = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); } #[test] fn test_composite_rev_over_fwd() { //(3x^2)'' = 6 let l0 = Leaf(ValType::F(4.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let ret = a .fwd() .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); } #[test] fn test_composite_rev_over_fwd_change_input() { //(3x^2)'' = 6 let l0 = Leaf(ValType::F(4.)).active(); let mut l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let mut gg = a .fwd() .rev() .get_mut(&l0) .expect("l0 adjoint missing") .clone(); let ret = gg.apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); //change to (7x^2)''=(14x)'=14 l1.set_val(ValType::F(7.)); let ret2 = gg.apply_rev(); dbg!(&ret2); assert!(eq_f32(ret2.into(), 14.)); } #[test] fn test_composite_rev_over_rev_change_input() { //(3x^2)'' = 6 let l0 = Leaf(ValType::F(4.)); let mut l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let mut gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .clone(); let ret = gg.apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); //change to (7x^2)''=(14x)'=14 l1.set_val(ValType::F(7.)); let ret2 = gg.apply_rev(); dbg!(&ret2); assert!(eq_f32(ret2.into(), 14.)); } #[test] fn test_composite_fwd_over_rev_change_input() { //(3x^2)'' = 6 let l0 = Leaf(ValType::F(4.)).active(); let mut l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let mut gg = a.rev().get_mut(&l0).expect("l0 adjoint missing").fwd(); let ret = gg.apply_fwd(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); //change to (7x^2)''=(14x)'=14 l1.set_val(ValType::F(7.)); let ret2 = gg.apply_fwd(); dbg!(&ret2); assert!(eq_f32(ret2.into(), 14.)); } #[test] fn test_composite_fwd_over_fwd_change_input() { //y=3*x^2 where x=4 //compute y'' = (6x)' = 6 let l0 = Leaf(ValType::F(4.)).active(); let mut l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), l1.clone()); let mut gg = a.fwd().fwd(); let ret = gg.apply_fwd(); dbg!(&ret); assert!(eq_f32(ret.into(), 6.)); //change to (7x^2)''=(14x)'=14 l1.set_val(ValType::F(7.)); let ret2 = gg.apply_fwd(); dbg!(&ret2); assert!(eq_f32(ret2.into(), 14.)); } #[test] fn test_2nd_partial_derivative() { //x=4 //y=3 //f=x^2 * y^2 //d^2(f)/(dx dy) = d(d(x^2 * y^2)/dx)/dy = d(2x*y^2)/dy = 2x*2y = 4*4*3 = 48 let l0 = Leaf(ValType::F(4.)); let l1 = Leaf(ValType::F(3.)); let a = Mul(Mul(l0.clone(), l0.clone()), Mul(l1.clone(), l1.clone())); let mut gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l1) .expect("l1 adjoint missing") .clone(); let ret = gg.apply_rev(); dbg!(&ret); assert!(eq_f32(ret.into(), 48.)); } #[test] fn test_trig_sin_fwd() { //y=3*sin(x) where x=2 //y'=3*cos(x) where x=2 //y''=-3*sin(x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let mut a = Mul(Sin(l0.clone()), l1.clone()); assert!(eq_f32(a.apply_fwd().into(), 3.0 * 2f32.sin())); let mut g = a.fwd(); assert!(eq_f32(g.apply_fwd().into(), 3.0 * 2f32.cos())); let mut gg = g.fwd(); assert!(eq_f32(gg.apply_fwd().into(), -3.0 * 2f32.sin())); } #[test] fn test_trig_sin_rev() { //y=3*sin(x) where x=2 //y'=3*cos(x) where x=2 //y''=-3*sin(x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Sin(l0.clone()), l1.clone()); { let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 3.0 * 2f32.cos())); } { let gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(gg.into(), -3.0 * 2f32.sin())); } } #[test] fn test_trig_cos_fwd() { //y=3*cos(x) where x=2 //y'=-3*sin(x) where x=2 //y''=-3*cos(x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let mut a = Mul(Cos(l0.clone()), l1.clone()); assert!(eq_f32(a.apply_fwd().into(), 3.0 * 2f32.cos())); let mut g = a.fwd(); assert!(eq_f32(g.apply_fwd().into(), -3.0 * 2f32.sin())); let mut gg = g.fwd(); assert!(eq_f32(gg.apply_fwd().into(), -3.0 * 2f32.cos())); } #[test] fn test_trig_cos_rev() { //y=3*cos(x) where x=2 //y'=-3*sin(x) where x=2 //y''=-3*cos(x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let a = Mul(Cos(l0.clone()), l1.clone()); { let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), -3.0 * 2f32.sin())); } { let gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(gg.into(), -3.0 * 2f32.cos())); } } #[test] fn test_exp_fwd() { //y=3*exp(4x) where x=2 //y'=12*exp(4x) where x=2 //y''=48*exp(4x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Mul(Exp(Mul(l2.clone(), l0.clone())), l1.clone()); { assert!(eq_f32(a.fwd().apply_fwd().into(), 12. * 8f32.exp())); } { assert!(eq_f32(a.fwd().fwd().apply_fwd().into(), 48. * 8f32.exp())); } } #[test] fn test_exp_rev() { //y=3*exp(4x) where x=2 //y'=12*exp(4x) where x=2 //y''=48*exp(4x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Mul(Exp(Mul(l2.clone(), l0.clone())), l1.clone()); { let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 12. * 8f32.exp())); } { let gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(gg.into(), 48. * 8f32.exp())); } } #[test] fn test_div_fwd() { //y=3/(4x) where x=2 //y'=-3/4*x^-2 //y''=6/4*x^-3 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Div(l1.clone(), Mul(l2.clone(), l0.clone())); { assert!(eq_f32(a.fwd().apply_fwd().into(), -3. / 4. * 2f32.powi(-2))); } { assert!(eq_f32( a.fwd().fwd().apply_fwd().into(), 6. / 4. * 2f32.powi(-3) )); } } #[test] fn test_div_rev() { //y=3/(4x) where x=2 //y'=-3/4*x^-2 //y''=6/4*x^-3 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Div(l1.clone(), Mul(l2.clone(), l0.clone())); { let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), -3. / 4. * 2f32.powi(-2))); } { let gg = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(gg.into(), 6. / 4. * 2f32.powi(-3))); } } #[test] fn test_tan_fwd() { //y=3tan(4x) where x=2 //y'=12/(cos(4x))^2 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Mul(l1.clone(), Tan(Mul(l2.clone(), l0.clone()))); assert!(eq_f32( a.fwd().apply_fwd().into(), 12. / (8f32.cos().powi(2)) )); } #[test] fn test_tan_rev() { //y=3tan(4x) where x=2 //y'=12/(cos(4x))^2 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(3.)); let l2 = Leaf(ValType::F(4.)); let a = Mul(l1.clone(), Tan(Mul(l2.clone(), l0.clone()))); let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 12. / (8f32.cos().powi(2)))); } #[test] fn test_ln_fwd() { //y=ln(4x) where x=2 //y'=4/(4x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let a = Ln(Mul(l0.clone(), l1.clone())); let g = a.fwd().apply_fwd(); assert!(eq_f32(g.into(), 4. / 8.)); } #[test] fn test_ln_rev() { //y=ln(4x) where x=2 //y'=4/(4x) where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let a = Ln(Mul(l0.clone(), l1.clone())); let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 4. / 8.)); } #[test] fn test_pow_fwd() { //y=4x^3 where x=2 //y'=12*x^2 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let l2 = Leaf(ValType::F(3.)); let a = Mul(l1.clone(), Pow(l0.clone(), l2.clone())); assert!(eq_f32(a.fwd().apply_fwd().into(), 12. * 4.)); } #[test] fn test_pow_fwd_2() { //y=4^(3x) where x=2 //y'=ln(4)*4^(3x)*3 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let l2 = Leaf(ValType::F(3.)); let a = Pow(l1.clone(), Mul(l2.clone(), l0.clone())); assert!(eq_f32( a.fwd().apply_fwd().into(), 4f32.ln() * 4f32.powf(3. * 2.) * 3. )); } #[test] fn test_pow_rev() { //y=4x^3 where x=2 //y'=12*x^2 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let l2 = Leaf(ValType::F(3.)); let a = Mul(l1.clone(), Pow(l0.clone(), l2.clone())); let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 12. * 4.)); } #[test] fn test_pow_rev_2() { //y=4^(3x) where x=2 //y'=ln(4)*4^(3x)*3 where x=2 let l0 = Leaf(ValType::F(2.)).active(); let l1 = Leaf(ValType::F(4.)); let l2 = Leaf(ValType::F(3.)); let a = Pow(l1.clone(), Mul(l2.clone(), l0.clone())); let g = a .rev() .get_mut(&l0) .expect("l0 adjoint missing") .apply_rev(); assert!(eq_f32(g.into(), 4f32.ln() * 4f32.powf(3. * 2.) * 3.)); }
26.579837
100
0.483853
9194976f029e921613ee9b05977d70a119cd8c4e
565
dirmod::all!(default pub use; default dir pub); use derive_more::{Display, From}; use diesel::pg::PgConnection; use diesel::r2d2::ConnectionManager; use crate::Secrets; type ConnMan = ConnectionManager<PgConnection>; type Pool = r2d2::Pool<ConnMan>; #[derive(Clone)] pub struct Index(Pool); impl Index { pub fn try_new(secrets: &Secrets) -> Result<Index, r2d2::Error> { Ok(Index(Pool::new(ConnMan::new(secrets.database().url()))?)) } } #[derive(From, Display)] pub enum QueryError { R2d2(r2d2::Error), Diesel(diesel::result::Error), }
21.730769
69
0.684956
715941ed056a2042c6ba564ac790ef04edac0f09
10,593
//! Request extractors use std::{future::Future, pin::Pin, task::Context, task::Poll}; use super::error::ErrorRenderer; use super::httprequest::HttpRequest; use crate::{http::Payload, util::Ready}; /// Trait implemented by types that can be extracted from request. /// /// Types that implement this trait can be used with `Route` handlers. pub trait FromRequest<Err>: Sized { /// The associated error which can be returned. type Error; /// Future that resolves to a Self type Future: Future<Output = Result<Self, Self::Error>>; /// Convert request to a Self fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future; /// Convert request to a Self /// /// This method uses `Payload::None` as payload stream. fn extract(req: &HttpRequest) -> Self::Future { Self::from_request(req, &mut Payload::None) } } /// Optionally extract a field from the request /// /// If the FromRequest for T fails, return None rather than returning an error response /// /// ## Example /// /// ```rust /// use ntex::{http, util::Ready}; /// use ntex::web::{self, error, App, HttpRequest, FromRequest, DefaultError}; /// use rand; /// /// #[derive(Debug, serde::Deserialize)] /// struct Thing { /// name: String /// } /// /// impl<Err> FromRequest<Err> for Thing { /// type Error = error::Error; /// type Future = Ready<Self, Self::Error>; /// /// fn from_request(req: &HttpRequest, payload: &mut http::Payload) -> Self::Future { /// if rand::random() { /// Ready::Ok(Thing { name: "thingy".into() }) /// } else { /// Ready::Err(error::ErrorBadRequest("no luck").into()) /// } /// } /// } /// /// /// extract `Thing` from request /// async fn index(supplied_thing: Option<Thing>) -> String { /// match supplied_thing { /// // Puns not intended /// Some(thing) => format!("Got something: {:?}", thing), /// None => format!("No thing!") /// } /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/users/:first").route( /// web::post().to(index)) /// ); /// } /// ``` impl<T, Err> FromRequest<Err> for Option<T> where T: FromRequest<Err> + 'static, T::Future: 'static, Err: ErrorRenderer, <T as FromRequest<Err>>::Error: Into<Err::Container>, { type Error = Err::Container; type Future = Pin<Box<dyn Future<Output = Result<Option<T>, Self::Error>>>>; #[inline] fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { let fut = T::from_request(req, payload); Box::pin(async move { match fut.await { Ok(v) => Ok(Some(v)), Err(e) => { log::debug!("Error for Option<T> extractor: {}", e.into()); Ok(None) } } }) } } /// Optionally extract a field from the request or extract the Error if unsuccessful /// /// If the `FromRequest` for T fails, inject Err into handler rather than returning an error response /// /// ## Example /// /// ```rust /// use ntex::{http, util::Ready}; /// use ntex::web::{self, error, App, HttpRequest, FromRequest}; /// use rand; /// /// #[derive(Debug, serde::Deserialize)] /// struct Thing { /// name: String /// } /// /// impl<Err> FromRequest<Err> for Thing { /// type Error = error::Error; /// type Future = Ready<Thing, Self::Error>; /// /// fn from_request(req: &HttpRequest, payload: &mut http::Payload) -> Self::Future { /// if rand::random() { /// Ready::Ok(Thing { name: "thingy".into() }) /// } else { /// Ready::Err(error::ErrorBadRequest("no luck").into()) /// } /// } /// } /// /// /// extract `Thing` from request /// async fn index(supplied_thing: Result<Thing, error::Error>) -> String { /// match supplied_thing { /// Ok(thing) => format!("Got thing: {:?}", thing), /// Err(e) => format!("Error extracting thing: {}", e) /// } /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/users/:first").route(web::post().to(index)) /// ); /// } /// ``` impl<T, E> FromRequest<E> for Result<T, T::Error> where T: FromRequest<E> + 'static, T::Error: 'static, T::Future: 'static, E: ErrorRenderer, { type Error = T::Error; type Future = Pin<Box<dyn Future<Output = Result<Result<T, T::Error>, Self::Error>>>>; #[inline] fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { let fut = T::from_request(req, payload); Box::pin(async move { match fut.await { Ok(v) => Ok(Ok(v)), Err(e) => Ok(Err(e)), } }) } } #[doc(hidden)] impl<E: ErrorRenderer> FromRequest<E> for () { type Error = E::Container; type Future = Ready<(), E::Container>; fn from_request(_: &HttpRequest, _: &mut Payload) -> Self::Future { Ok(()).into() } } macro_rules! tuple_from_req ({$fut_type:ident, $(($n:tt, $T:ident)),+} => { /// FromRequest implementation for a tuple #[allow(unused_parens)] impl<Err: ErrorRenderer, $($T: FromRequest<Err> + 'static),+> FromRequest<Err> for ($($T,)+) where $(<$T as $crate::web::FromRequest<Err>>::Error: Into<Err::Container>),+ { type Error = Err::Container; type Future = $fut_type<Err, $($T),+>; fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { $fut_type { items: <($(Option<$T>,)+)>::default(), $($T: $T::from_request(req, payload),)+ } } } pin_project_lite::pin_project! { #[doc(hidden)] pub struct $fut_type<Err: ErrorRenderer, $($T: FromRequest<Err>),+> { items: ($(Option<$T>,)+), $(#[pin] $T: $T::Future),+ } } impl<Err: ErrorRenderer, $($T: FromRequest<Err>),+> Future for $fut_type<Err, $($T),+> where $(<$T as $crate::web::FromRequest<Err>>::Error: Into<Err::Container>),+ { type Output = Result<($($T,)+), Err::Container>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); let mut ready = true; $( if this.items.$n.is_none() { match this.$T.poll(cx) { Poll::Ready(Ok(item)) => { this.items.$n = Some(item); } Poll::Pending => ready = false, Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), } } )+ if ready { Poll::Ready(Ok( ($(this.items.$n.take().unwrap(),)+) )) } else { Poll::Pending } } } }); #[allow(non_snake_case)] #[rustfmt::skip] mod m { use super::*; tuple_from_req!(TupleFromRequest1, (0, A)); tuple_from_req!(TupleFromRequest2, (0, A), (1, B)); tuple_from_req!(TupleFromRequest3, (0, A), (1, B), (2, C)); tuple_from_req!(TupleFromRequest4, (0, A), (1, B), (2, C), (3, D)); tuple_from_req!(TupleFromRequest5, (0, A), (1, B), (2, C), (3, D), (4, E)); tuple_from_req!(TupleFromRequest6, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F)); tuple_from_req!(TupleFromRequest7, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G)); tuple_from_req!(TupleFromRequest8, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H)); tuple_from_req!(TupleFromRequest9, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H), (8, I)); tuple_from_req!(TupleFromRequest10, (0, A), (1, B), (2, C), (3, D), (4, E), (5, F), (6, G), (7, H), (8, I), (9, J)); } #[cfg(test)] mod tests { use crate::http::header; use crate::util::Bytes; use crate::web::error::UrlencodedError; use crate::web::test::{from_request, TestRequest}; use crate::web::types::{Form, FormConfig}; #[derive(serde::Deserialize, Debug, PartialEq)] struct Info { hello: String, } #[crate::rt_test] async fn test_option() { let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, "application/x-www-form-urlencoded", ) .data(FormConfig::default().limit(4096)) .to_http_parts(); let r = from_request::<Option<Form<Info>>>(&req, &mut pl) .await .unwrap(); assert_eq!(r, None); let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, "application/x-www-form-urlencoded", ) .header(header::CONTENT_LENGTH, "9") .set_payload(Bytes::from_static(b"hello=world")) .to_http_parts(); let r = from_request::<Option<Form<Info>>>(&req, &mut pl) .await .unwrap(); assert_eq!( r, Some(Form(Info { hello: "world".into() })) ); let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, "application/x-www-form-urlencoded", ) .header(header::CONTENT_LENGTH, "9") .set_payload(Bytes::from_static(b"bye=world")) .to_http_parts(); let r = from_request::<Option<Form<Info>>>(&req, &mut pl) .await .unwrap(); assert_eq!(r, None); } #[crate::rt_test] async fn test_result() { let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, "application/x-www-form-urlencoded", ) .header(header::CONTENT_LENGTH, "11") .set_payload(Bytes::from_static(b"hello=world")) .to_http_parts(); let r = from_request::<Result<Form<Info>, UrlencodedError>>(&req, &mut pl) .await .unwrap(); assert_eq!( r.unwrap(), Form(Info { hello: "world".into() }) ); let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, "application/x-www-form-urlencoded", ) .header(header::CONTENT_LENGTH, "9") .set_payload(Bytes::from_static(b"bye=world")) .to_http_parts(); let r = from_request::<Result<Form<Info>, UrlencodedError>>(&req, &mut pl) .await .unwrap(); assert!(r.is_err()); } }
30.704348
116
0.52176
e2272837efb7a5c41eeb3f2cc3f2fba0a590616a
640
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:issue_9188.rs extern crate issue_9188; pub fn main() { let a = issue_9188::bar(); let b = issue_9188::foo::<isize>(); assert_eq!(*a, *b); }
30.47619
69
0.70625
e52dea1ef243883db9e74e0635458b2969dab211
4,139
use crate::{ error::Result, hash, sync::{self, diff::DiffOptions, CommitId, RepoPath}, AsyncGitNotification, FileDiff, }; use crossbeam_channel::Sender; use std::{ hash::Hash, sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }, }; /// #[derive(Debug, Hash, Clone, PartialEq)] pub enum DiffType { /// diff two commits Commits((CommitId, CommitId)), /// diff in a given commit Commit(CommitId), /// diff against staged file Stage, /// diff against file in workdir WorkDir, } /// #[derive(Debug, Hash, Clone, PartialEq)] pub struct DiffParams { /// path to the file to diff pub path: String, /// what kind of diff pub diff_type: DiffType, /// diff options pub options: DiffOptions, } struct Request<R, A>(R, Option<A>); #[derive(Default, Clone)] struct LastResult<P, R> { params: P, result: R, } /// pub struct AsyncDiff { current: Arc<Mutex<Request<u64, FileDiff>>>, last: Arc<Mutex<Option<LastResult<DiffParams, FileDiff>>>>, sender: Sender<AsyncGitNotification>, pending: Arc<AtomicUsize>, repo: RepoPath, } impl AsyncDiff { /// pub fn new( repo: RepoPath, sender: &Sender<AsyncGitNotification>, ) -> Self { Self { repo, current: Arc::new(Mutex::new(Request(0, None))), last: Arc::new(Mutex::new(None)), sender: sender.clone(), pending: Arc::new(AtomicUsize::new(0)), } } /// pub fn last(&mut self) -> Result<Option<(DiffParams, FileDiff)>> { let last = self.last.lock()?; Ok(last.clone().map(|res| (res.params, res.result))) } /// pub fn refresh(&mut self) -> Result<()> { if let Ok(Some(param)) = self.get_last_param() { self.clear_current()?; self.request(param)?; } Ok(()) } /// pub fn is_pending(&self) -> bool { self.pending.load(Ordering::Relaxed) > 0 } /// pub fn request( &mut self, params: DiffParams, ) -> Result<Option<FileDiff>> { log::trace!("request {:?}", params); let hash = hash(&params); { let mut current = self.current.lock()?; if current.0 == hash { return Ok(current.1.clone()); } current.0 = hash; current.1 = None; } let arc_current = Arc::clone(&self.current); let arc_last = Arc::clone(&self.last); let sender = self.sender.clone(); let arc_pending = Arc::clone(&self.pending); let repo = self.repo.clone(); self.pending.fetch_add(1, Ordering::Relaxed); rayon_core::spawn(move || { let notify = Self::get_diff_helper( &repo, params, &arc_last, &arc_current, hash, ); let notify = match notify { Err(err) => { log::error!("get_diff_helper error: {}", err); true } Ok(notify) => notify, }; arc_pending.fetch_sub(1, Ordering::Relaxed); sender .send(if notify { AsyncGitNotification::Diff } else { AsyncGitNotification::FinishUnchanged }) .expect("error sending diff"); }); Ok(None) } fn get_diff_helper( repo_path: &RepoPath, params: DiffParams, arc_last: &Arc< Mutex<Option<LastResult<DiffParams, FileDiff>>>, >, arc_current: &Arc<Mutex<Request<u64, FileDiff>>>, hash: u64, ) -> Result<bool> { let res = match params.diff_type { DiffType::Stage => sync::diff::get_diff( repo_path, &params.path, true, Some(params.options), )?, DiffType::WorkDir => sync::diff::get_diff( repo_path, &params.path, false, Some(params.options), )?, DiffType::Commit(id) => sync::diff::get_diff_commit( repo_path, id, params.path.clone(), )?, DiffType::Commits(ids) => sync::diff::get_diff_commits( repo_path, ids, params.path.clone(), )?, }; let mut notify = false; { let mut current = arc_current.lock()?; if current.0 == hash { current.1 = Some(res.clone()); notify = true; } } { let mut last = arc_last.lock()?; *last = Some(LastResult { result: res, params, }); } Ok(notify) } fn get_last_param(&self) -> Result<Option<DiffParams>> { Ok(self.last.lock()?.clone().map(|e| e.params)) } fn clear_current(&mut self) -> Result<()> { let mut current = self.current.lock()?; current.0 = 0; current.1 = None; Ok(()) } }
19.073733
67
0.617057
22227302c42602cf94ff54f19e815f75d9227852
7,533
//! use errors::*; use fold::{self, Fold, IdentityExistentialFolder, IdentityUniversalFolder, TypeFolder}; use ir::*; use solve::infer::InferenceTable; pub fn truncate<T>( infer: &mut InferenceTable, universe: UniverseIndex, max_size: usize, value: &T, ) -> Truncated<T::Result> where T: Fold, { debug_heading!("truncate(max_size={}, value={:?})", max_size, value); let mut truncater = Truncater::new(infer, universe, max_size); let value = value .fold_with(&mut truncater, 0) .expect("Truncater is infallible"); debug!("truncate: overflow={} value={:?}", truncater.overflow, value); Truncated { overflow: truncater.overflow, value, } } /// Result from `truncate`. pub struct Truncated<T> { /// If true, then `value` was truncated relative to the original /// (e.g., fresh inference variables were introduced). If false, /// then it is effectively a clone of the original. pub overflow: bool, /// Possibly truncate value. pub value: T, } struct Truncater<'infer> { infer: &'infer mut InferenceTable, universe: UniverseIndex, current_size: usize, max_size: usize, overflow: bool, } impl<'infer> Truncater<'infer> { fn new(infer: &'infer mut InferenceTable, universe: UniverseIndex, max_size: usize) -> Self { Truncater { infer, universe, current_size: 0, max_size, overflow: false, } } fn overflow(&mut self, pre_size: usize) -> Ty { self.overflow = true; self.current_size = pre_size + 1; self.infer.new_variable(self.universe).to_ty() } } impl<'infer> TypeFolder for Truncater<'infer> { fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Result<Ty> { if let Some(normalized_ty) = self.infer.normalize_shallow(ty, binders) { return self.fold_ty(&normalized_ty, binders); } let pre_size = self.current_size; self.current_size += 1; let result = fold::super_fold_ty(self, ty, binders)?; // We wish to maintain the invariant that: // // pre_size < self.max_size => // post_size <= self.max_size // // Presuming that `pre_size < self.max_size`, then the // invariant is in jeopardy if `post_size > self.max_size`. // To repair the situation, we replace the entire subtree with // a fresh existential variable (in the innermost universe). let post_size = self.current_size; let result = if pre_size < self.max_size && post_size > self.max_size { self.overflow(pre_size).up_shift(binders) } else { result }; // When we get back to the first invocation, clear the counters. // We process each type independently. if pre_size == 0 { self.current_size = 0; } Ok(result) } fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Result<Lifetime> { fold::super_fold_lifetime(self, lifetime, binders) } } impl<'infer> IdentityExistentialFolder for Truncater<'infer> {} impl<'infer> IdentityUniversalFolder for Truncater<'infer> {} #[test] fn truncate_types() { let mut table = InferenceTable::new(); let environment0 = &Environment::new(); let environment1 = &environment0.new_universe(); let u1 = environment1.universe; let environment2 = &environment1.new_universe(); // Vec<Vec<Vec<Vec<T>>>> let ty0 = ty!(apply (item 0) (apply (item 0) (apply (item 0) (apply (item 0) (apply (skol 1)))))); // test: no truncation with size 5 let Truncated { overflow, value: ty_no_overflow, } = truncate(&mut table, u1, 5, &ty0); assert!(!overflow); assert_eq!(ty0, ty_no_overflow); // test: with size 3, truncates to `Vec<Vec<X>>` let ty_expect = ty!(apply (item 0) (apply (item 0) (var 0))); let Truncated { overflow, value: ty_overflow, } = truncate(&mut table, u1, 3, &ty0); assert!(overflow); assert_eq!(ty_expect, ty_overflow); // test: the `X` is in u1, hence should fail to unify with a skolemized value in U2. let ty_in_u2 = ty!(apply (item 0) (apply (item 0) (apply (skol 2)))); table .unify(environment2, &ty_overflow, &ty_in_u2) .unwrap_err(); } #[test] fn truncate_multiple_types() { let mut table = InferenceTable::new(); let environment0 = &Environment::new(); let environment1 = &environment0.new_universe(); let u1 = environment1.universe; // Vec<Vec<Vec<Vec<T>>>> let ty0 = ty!(apply (item 0) (apply (item 0) (apply (item 0) (apply (item 0) (apply (skol 1)))))); // test: no truncation with size 5 let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; let Truncated { overflow, value: ty_no_overflow, } = truncate(&mut table, u1, 5, &ty0_3); assert!(!overflow); assert_eq!(ty0_3, ty_no_overflow); // test: no truncation with size 6 let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; let Truncated { overflow, value: ty_no_overflow, } = truncate(&mut table, u1, 6, &ty0_3); assert!(!overflow); assert_eq!(ty0_3, ty_no_overflow); // test: truncation of all types evenly with size 3 let ty0_3 = vec![ty0.clone(), ty0.clone(), ty0.clone()]; let Truncated { overflow, value: ty_overflow, } = truncate(&mut table, u1, 3, &ty0_3); assert!(overflow); assert_eq!( vec![ ty!(apply (item 0) (apply (item 0) (var 0))), ty!(apply (item 0) (apply (item 0) (var 1))), ty!(apply (item 0) (apply (item 0) (var 2))), ], ty_overflow ); } #[test] fn truncate_normalizes() { let mut table = InferenceTable::new(); let environment0 = &Environment::new(); let environment1 = &environment0.new_universe(); let u1 = environment1.universe; // ty0 = Vec<Vec<X>> let v0 = table.new_variable(u1); let ty0 = ty!(apply (item 0) (apply (item 0) (var 0))); // ty1 = Vec<Vec<T>> let ty1 = ty!(apply (item 0) (apply (item 0) (apply (skol 1)))); // test: truncating *before* unifying has no effect assert!(!truncate(&mut table, u1, 3, &ty0).overflow); // unify X and ty1 table.unify(environment1, &v0.to_ty(), &ty1).unwrap(); // test: truncating *after* triggers let Truncated { overflow, value: ty_overflow, } = truncate(&mut table, u1, 3, &ty0); assert!(overflow); assert_eq!( ty!(apply (item 0) (apply (item 0) (var 1))), ty_overflow); } #[test] fn truncate_normalizes_under_binders() { let mut table = InferenceTable::new(); let u0 = UniverseIndex::root(); // v0 = X let _v0 = table.new_variable(u0); // ty0 = for<'a> Vec<Vec<X>> let ty0 = ty!(for_all 1 (apply (item 0) (apply (item 0) (var 1)))); // the index in `(var 1)` should be adjusted to account for binders assert!(!truncate(&mut table, u0, 4, &ty0).overflow); }
28.642586
97
0.568565
014365866f1a40db41d35ac4dbfba76adc32c660
341
use anyhow::Result; use aoc_2019_day_05::*; #[test] fn part_one_answer() -> Result<()> { let input = include_str!("../input/input.txt"); assert_eq!(part_one(input)?, 1); Ok(()) } #[test] fn part_two_answer() -> Result<()> { let input = include_str!("../input/input.txt"); assert_eq!(part_two(input)?, 2); Ok(()) }
18.944444
51
0.589443
8fc8e3aa920307ac8f7616478c488340958db528
3,384
// iterators5.rs // Let's define a simple model to track Rustlings exercise progress. Progress // will be modelled using a hash map. The name of the exercise is the key and // the progress is the value. Two counting functions were created to count the // number of exercises with a given progress. These counting functions use // imperative style for loops. Recreate this counting functionality using // iterators. Only the two iterator methods (count_iterator and // count_collection_iterator) need to be modified. // Execute `rustlings hint iterators5` for hints. // // Make the code compile and the tests pass. use std::collections::HashMap; #[derive(Clone, Copy, PartialEq, Eq)] enum Progress { None, Some, Complete, } fn count_for(map: &HashMap<String, Progress>, value: Progress) -> usize { let mut count = 0; for val in map.values() { if val == &value { count += 1; } } count } fn count_iterator(map: &HashMap<String, Progress>, value: Progress) -> usize { map.values().filter(|&x| *x == value).count() } fn count_collection_for(collection: &[HashMap<String, Progress>], value: Progress) -> usize { let mut count = 0; for map in collection { for val in map.values() { if val == &value { count += 1; } } } count } fn count_collection_iterator(collection: &[HashMap<String, Progress>], value: Progress) -> usize { collection .iter() .flat_map(|map| map.values()) .filter(|&x| *x == value) .count() } #[cfg(test)] mod tests { use super::*; #[test] fn count_complete() { let map = get_map(); assert_eq!(3, count_iterator(&map, Progress::Complete)); } #[test] fn count_equals_for() { let map = get_map(); assert_eq!( count_for(&map, Progress::Complete), count_iterator(&map, Progress::Complete) ); } #[test] fn count_collection_complete() { let collection = get_vec_map(); assert_eq!( 6, count_collection_iterator(&collection, Progress::Complete) ); } #[test] fn count_collection_equals_for() { let collection = get_vec_map(); assert_eq!( count_collection_for(&collection, Progress::Complete), count_collection_iterator(&collection, Progress::Complete) ); } fn get_map() -> HashMap<String, Progress> { use Progress::*; let mut map = HashMap::new(); map.insert(String::from("variables1"), Complete); map.insert(String::from("functions1"), Complete); map.insert(String::from("hashmap1"), Complete); map.insert(String::from("arc1"), Some); map.insert(String::from("as_ref_mut"), None); map.insert(String::from("from_str"), None); map } fn get_vec_map() -> Vec<HashMap<String, Progress>> { use Progress::*; let map = get_map(); let mut other = HashMap::new(); other.insert(String::from("variables2"), Complete); other.insert(String::from("functions2"), Complete); other.insert(String::from("if1"), Complete); other.insert(String::from("from_into"), None); other.insert(String::from("try_from_into"), None); vec![map, other] } }
27.737705
98
0.600473
0837f74576a9d9164da7a37171af08ec80835366
2,076
//! Run code on an Artichoke interpreter. use crate::value::Value; use crate::ArtichokeError; /// Marker trait for a context used by [`Eval`]. pub trait Context {} /// Interpreters that implement [`Eval`] expose methods for injecting code and /// extracting [`Value`]s from the interpereter. /// /// Implementations are expected to maintain a stack of `Context` objects /// that maintain filename context across nested invocations of /// [`Eval::eval`]. pub trait Eval { /// Concrete type for eval context. type Context: Context; /// Concrete type for return values from eval. type ReturnValue: Value; /// Filename of the top eval context. const TOP_FILENAME: &'static str = "(eval)"; /// Eval code on the artichoke interpreter using the current `Context`. fn eval(&self, code: &[u8]) -> Result<Self::ReturnValue, ArtichokeError>; /// Eval code on the artichoke interpreter using the current `Context`. /// /// Exceptions will unwind past this call. fn unchecked_eval(&self, code: &[u8]) -> Self::ReturnValue; /// Eval code on the artichoke interpreter using a custom `Context`. /// /// `Context` allows manipulating interpreter state before eval, for /// example, setting the `__FILE__` magic constant. fn eval_with_context( &self, code: &[u8], context: Self::Context, ) -> Result<Self::ReturnValue, ArtichokeError>; /// Eval code on the artichoke interpreter using a custom `Context`. /// /// `Context` allows manipulating interpreter state before eval, for /// example, setting the `__FILE__` magic constant. /// /// Exceptions will unwind past this call. fn unchecked_eval_with_context(&self, code: &[u8], context: Self::Context) -> Self::ReturnValue; /// Peek at the top of the [`Context`] stack. fn peek_context(&self) -> Option<&Self::Context>; /// Push an `Context` onto the stack. fn push_context(&mut self, context: Self::Context); /// Pop an `Context` from the stack. fn pop_context(&mut self); }
34.032787
78
0.66474
916aefd0a4cf471fb8b64c3ce506b9aa1b637bd1
21,044
// This parse library is more high-level and functional than existing alternatives. // A Parser is defined as (with details omitted): // // Answer<A> = Result<(State, A), String> // Parser<A> = Fn(State) -> Answer<A>> // // Similarly to https://github.com/AndrasKovacs/flatparse, there are 2 ways to fail. // // 1. Recoverable. Use Parser<Option<A>>, and return: // - Ok((new_state, Some(result))) if it succeeds // - Ok((old_state, None)) if it fails // // This backtracks, and should be used to implement alternatives. For example, if you're // parsing an AST, "Animal", with 2 constructors, dog and cat, then you should implement: // // parse_dog : Parser<Option<Animal>> // parse_cat : Parser<Option<Animal>> // parse_animal : Parser<Animal> // // 2. Irrecoverable. Return: // - Err(error_message) // // This will abort the entire parser, like a "throw", and return the error message. Use this // when you know that only one parsing branch can reach this location, yet the source is wrong. // // Check the Testree example at the bottom of this file. #![allow(dead_code)] use itertools::Itertools as _; // Types // ===== #[derive(Clone, Copy, Debug)] pub struct State<'a> { pub code: &'a str, pub index: usize, } impl<'a> State<'a> { fn rest(&self) -> Option<&'a str> { self.code.get(self.index..) } } pub type Answer<'a, A> = Result<(State<'a>, A), String>; pub type Parser<'a, A> = Box<dyn Fn(State<'a>) -> Answer<'a, A>>; // Utils // ===== pub fn find(text: &str, target: &str) -> usize { text.find(target).unwrap_or_else(|| panic!("`{}` not in `{}`.", target, text)) } pub fn read<'a, A>(parser: Parser<'a, A>, code: &'a str) -> Result<A, String> { match parser(State { code, index: 0 }) { Ok((_, value)) => Ok(value), Err(msg) => { Err(msg) } } } // Elims // ===== /// Maybe gets the current character. pub fn head(state: State) -> Option<char> { state.rest()?.chars().next() } /// Skips the current character. pub fn tail(state: State) -> State { let add = match head(state) { Some(c) => c.len_utf8(), None => 0, }; // NOTE: Could just mutate `state.index` here? State { code: state.code, index: state.index + add } } /// Skips comments and whitespace, then returns the next `char`, or the null /// character if this doesn't exist. pub fn get_char(state: State) -> Answer<char> { let (state, _) = skip(state)?; if let Some(got) = head(state) { let state = State { code: state.code, index: state.index + got.len_utf8() }; Ok((state, got)) } else { Ok((state, '\0')) } } pub fn get_char_parser<'a>() -> Parser<'a, char> { Box::new(get_char) } // Skippers // ======== pub fn skip_comment(mut state: State) -> Answer<bool> { const COMMENT: &str = "//"; if let Some(rest) = state.rest() { if let Some(line) = rest.lines().next() { if line.starts_with(COMMENT) { state.index += line.len(); return Ok((state, true)); } } } Ok((state, false)) } pub fn skip_comment_parser<'a>() -> Parser<'a, bool> { Box::new(skip_comment) } pub fn skip_spaces(mut state: State) -> Answer<bool> { if let Some(rest) = state.rest() { let add: usize = rest.chars().take_while(|a| a.is_whitespace()).map(|a| a.len_utf8()).sum(); state.index += add; if add > 0 { return Ok((state, true)); } } Ok((state, false)) } pub fn skip_spaces_parser<'a>() -> Parser<'a, bool> { Box::new(skip_spaces) } /// Skips comments and whitespace. pub fn skip(mut state: State) -> Answer<bool> { let (new_state, mut comment) = skip_comment(state)?; state = new_state; let (new_state, mut spaces) = skip_spaces(state)?; state = new_state; if comment || spaces { loop { let (new_state, new_comment) = skip_comment(state)?; state = new_state; comment = new_comment; let (new_state, new_spaces) = skip_spaces(state)?; state = new_state; spaces = new_spaces; if !comment && !spaces { return Ok((state, true)); } } } Ok((state, false)) } pub fn skip_parser<'a>() -> Parser<'a, bool> { Box::new(skip) } // Strings // ======= /// Attempts to match a string right after the cursor. /// Returns `true` if successful. Consumes string. pub fn text_here<'a>(pat: &str, state: State<'a>) -> Answer<'a, bool> { if let Some(rest) = state.rest() { if rest.starts_with(pat) { let state = State { code: state.code, index: state.index + pat.len() }; return Ok((state, true)); } } Ok((state, false)) } pub fn text_here_parser<'a>(pat: &'static str) -> Parser<'a, bool> { Box::new(move |x| text_here(pat, x)) } /// Like 'text_here', but skips whitespace and comments first. pub fn text<'a>(pat: &str, state: State<'a>) -> Answer<'a, bool> { let (state, _) = skip(state)?; let (state, matched) = text_here(pat, state)?; Ok((state, matched)) } pub fn text_parser<'a>(pat: &'static str) -> Parser<'a, bool> { Box::new(move |x| text(pat, x)) } /// Like 'text', but aborts if there is no match. pub fn consume<'a>(pat: &str, state: State<'a>) -> Answer<'a, ()> { let (state, matched) = text(pat, state)?; if matched { Ok((state, ())) } else { expected(pat, pat.len(), state) } } pub fn consume_parser<'a>(pat: &'static str) -> Parser<'a, ()> { Box::new(move |x| consume(pat, x)) } /// Returns `true` if cursor will be at the end of the file after skipping whitespace and comments. pub fn done(state: State) -> Answer<bool> { let (state, _) = skip(state)?; Ok((state, state.index == state.code.len())) } pub fn done_parser<'a>() -> Parser<'a, bool> { Box::new(done) } // Blocks // ====== /// Checks if a dry-run of the first parser returns `true`. /// If so, applies the second parser and returns `Some`. /// If no, returns `None`. pub fn guard<'a, A: 'a>( head: Parser<'a, bool>, body: Parser<'a, A>, state: State<'a>, ) -> Answer<'a, Option<A>> { let (state, _) = skip(state)?; let (_, matched) = head(state)?; if matched { let (state, got) = body(state)?; Ok((state, Some(got))) } else { Ok((state, None)) } } pub fn parser_or<'a>(parsers: &[Parser<'a, bool>], state: State<'a>) -> Answer<'a, bool> { for parser in parsers { let (state, matched) = parser(state)?; if matched { return Ok((state, true)); } } Ok((state, false)) } // Applies optional parsers in sequence. // Returns the first that succeeds. // If none succeeds, aborts. pub fn grammar<'a, A: 'a>( name: &'static str, choices: &[Parser<'a, Option<A>>], state: State<'a>, ) -> Answer<'a, A> { for choice in choices { let (state, result) = choice(state)?; if let Some(value) = result { return Ok((state, value)); } } expected(name, 1, state) } // Combinators // =========== pub fn maybe<'a, A: 'a>(parser: Parser<'a, A>, state: State<'a>) -> Answer<'a, Option<A>> { let result = parser(state); match result { Ok((state, result)) => Ok((state, Some(result))), Err(_) => Ok((state, None)), } } /// Evaluates a parser and returns its result, but reverts its effect. pub fn dry<'a, A: 'a>(parser: Parser<'a, A>, state: State<'a>) -> Answer<'a, A> { let (_, result) = parser(state)?; Ok((state, result)) } /// Evaluates a parser until a condition is met. Returns an array of results. pub fn until<'a, A: 'a>( delim: Parser<'a, bool>, parser: Parser<'a, A>, state: State<'a>, ) -> Answer<'a, Vec<A>> { let mut state = state; let mut result = Vec::new(); loop { let (new_state, delimited) = delim(state)?; if delimited { state = new_state; break; } else { let (new_state, a) = parser(new_state)?; state = new_state; result.push(a); } } Ok((state, result)) } /// Evaluates a list-like parser, with an opener, separator, and closer. pub fn list<'a, A: 'a, B: 'a>( parse_open: Parser<'a, bool>, parse_sep: Parser<'a, bool>, parse_close: Parser<'a, bool>, parse_elem: Parser<'a, A>, make: Box<dyn Fn(Vec<A>) -> B>, state: State<'a>, ) -> Answer<'a, B> { let (state, _) = parse_open(state)?; let mut state = state; let mut elems = Vec::new(); loop { let (new_state, done) = parse_close(state)?; let (new_state, _) = parse_sep(new_state)?; if done { state = new_state; break; } else { let (new_state, elem) = parse_elem(new_state)?; state = new_state; elems.push(elem); } } Ok((state, make(elems))) } // Name // ==== /// Checks if input is a valid character for names. fn is_letter(chr: char) -> bool { chr.is_ascii_alphanumeric() || chr == '_' || chr == '.' || chr == '$' } /// Parses a name right after the parsing cursor. pub fn name_here(state: State) -> Answer<String> { let mut name: String = String::new(); let mut state = state; while let Some(got) = head(state) { if is_letter(got) { name.push(got); state = tail(state); } else { break; } } Ok((state, name)) } /// Parses a name after skipping comments and whitespace. pub fn name(state: State) -> Answer<String> { let (state, _) = skip(state)?; name_here(state) } /// Parses a non-empty name after skipping. pub fn name1(state: State) -> Answer<String> { let (state, name1) = name(state)?; if !name1.is_empty() { Ok((state, name1)) } else { expected("name", 1, state) } } // Errors // ====== pub fn expected<'a, A>(name: &str, size: usize, state: State<'a>) -> Answer<'a, A> { Err(format!("Expected {}:\n{}", name, &highlight(state.index, state.index + size, state.code))) } // WARN: This fails if `from_index` or `to_index` are not `char` boundaries. // Should probably not use slice indexing directly, maybe use `get` method and // handle possible error instead? pub fn highlight(from_index: usize, to_index: usize, code: &str) -> String { debug_assert!(to_index >= from_index); debug_assert!(code.get(from_index..to_index).is_some()); //let open = "<<<<####"; //let close = "####>>>>"; let open = "««««"; let close = "»»»»"; let open_color = "\x1b[4m\x1b[31m"; let close_color = "\x1b[0m"; let mut from_line = 0; let mut to_line = 0; for (i, c) in code.char_indices().filter(|(_, c)| c == &'\n').take_while(|(i, _)| i < &to_index) { if i < from_index { from_line += c.len_utf8(); } to_line += c.len_utf8(); } let code = [&code[0..from_index], open, &code[from_index..to_index], close, &code[to_index..code.len()]] .concat(); let block_from_line = std::cmp::max(from_line as i64 - 3, 0) as usize; let block_to_line = std::cmp::min(to_line + 3, code.lines().count()); code .lines() .enumerate() .skip_while(|(i, _)| i < &block_from_line) .take_while(|(i, _)| i < &block_to_line) .map(|(_, line)| line) .enumerate() .format_with("", |(i, line), f| { let numb = block_from_line + i; // TODO: An allocation of an intermediate string still occurs here // which is inefficient. Should figure out how to improve this. let rest = if numb == from_line && numb == to_line { [ &line[0..find(line, open)], open_color, &line[find(line, open) + open.len()..find(line, close)], close_color, &line[find(line, close) + close.len()..line.len()], "\n", ] .concat() } else if numb == from_line { [&line[0..find(line, open)], open_color, &line[find(line, open)..line.len()], "\n"].concat() } else if numb > from_line && numb < to_line { [open_color, line, close_color, "\n"].concat() } else if numb == to_line { [ &line[0..find(line, open)], open_color, &line[find(line, open)..find(line, close) + close.len()], close_color, "\n", ] .concat() } else { [line, "\n"].concat() }; f(&format_args!(" {} | {}", numb, rest)) }) .to_string() } // Tests // ===== pub enum Testree { Node { lft: Box<Testree>, rgt: Box<Testree> }, Leaf { val: String }, } pub fn testree_show(tt: &Testree) -> String { match tt { Testree::Node { lft, rgt } => format!("({} {})", testree_show(lft), testree_show(rgt)), Testree::Leaf { val } => val.to_string(), } } pub fn node_parser<'a>() -> Parser<'a, Option<Box<Testree>>> { Box::new(|state| { guard( text_parser("("), Box::new(|state| { let (state, _) = consume("(", state)?; let (state, lft) = testree_parser()(state)?; let (state, rgt) = testree_parser()(state)?; let (state, _) = consume(")", state)?; Ok((state, Box::new(Testree::Node { lft, rgt }))) }), state, ) }) } pub fn leaf_parser<'a>() -> Parser<'a, Option<Box<Testree>>> { Box::new(|state| { guard( text_parser(""), Box::new(|state| { let (state, val) = name(state)?; Ok((state, Box::new(Testree::Leaf { val }))) }), state, ) }) } pub fn testree_parser<'a>() -> Parser<'a, Box<Testree>> { Box::new(|state| { let (state, tree) = grammar("Testree", &[node_parser(), leaf_parser()], state)?; Ok((state, tree)) }) } #[cfg(test)] mod tests { use super::*; use proptest::prelude::*; mod old_parser { use super::super::*; pub fn flatten(texts: &[&str]) -> String { texts.concat() } pub fn lines(text: &str) -> Vec<String> { text.lines().map(String::from).collect() } pub fn highlight(from_index: usize, to_index: usize, code: &str) -> String { //let open = "<<<<####"; //let close = "####>>>>"; let open = "««««"; let close = "»»»»"; let open_color = "\x1b[4m\x1b[31m"; let close_color = "\x1b[0m"; let mut from_line = 0; let mut to_line = 0; for (i, c) in code.chars().enumerate() { if c == '\n' { if i < from_index { from_line += 1; } if i < to_index { to_line += 1; } } } let code: String = flatten(&[ &code[0..from_index], open, &code[from_index..to_index], close, &code[to_index..code.len()], ]); let lines: Vec<String> = lines(&code); let block_from_line = std::cmp::max(from_line as i64 - 3, 0) as usize; let block_to_line = std::cmp::min(to_line + 3, lines.len()); let mut text = String::new(); for (i, line) in lines[block_from_line..block_to_line].iter().enumerate() { let numb = block_from_line + i; let rest; if numb == from_line && numb == to_line { rest = flatten(&[ &line[0..find(line, open)], open_color, &line[find(line, open) + open.len()..find(line, close)], close_color, &line[find(line, close) + close.len()..line.len()], "\n", ]); } else if numb == from_line { rest = flatten(&[ &line[0..find(line, open)], open_color, &line[find(line, open)..line.len()], "\n", ]); } else if numb > from_line && numb < to_line { rest = flatten(&[open_color, line, close_color, "\n"]); } else if numb == to_line { rest = flatten(&[ &line[0..find(line, open)], open_color, &line[find(line, open)..find(line, close) + close.len()], close_color, "\n", ]); } else { rest = flatten(&[line, "\n"]); } let line = format!(" {} | {}", numb, rest); text.push_str(&line); } text } } // Matches anything. const RE_ANY: &str = "(?s).*"; #[derive(Debug)] struct MockState { code: String, index: usize, } prop_compose! { fn state_tail()( any in RE_ANY, ch in any::<char>() ) -> MockState { let code = format!("{}{}", ch, any); let index = ch.len_utf8(); MockState { code, index } } } proptest! { #[test] fn test_tail(state in state_tail()) { let state_after = tail(State { code: &state.code, index: 0 }); prop_assert_eq!(state.index, state_after.index); prop_assert!( state_after.index <= state.code.len(), "\ncode length: {}\nindex: {}\n", state.code.len(), state_after.index ); } } const COMMENT: &str = "//"; // Matches any line (i.e., that doesn't contain `'\r'` or `'\n'`). const RE_LINE: &str = "[^\r\n]*"; prop_compose! { fn state_skip_comment()( line in RE_LINE.prop_filter( "Values must not start with `COMMENT`.", |a| !a.starts_with(COMMENT)), will_comment in any::<bool>(), any in RE_ANY, ) -> (MockState, bool) { let index = if will_comment { COMMENT.len() + line.len() } else { 0 }; let code = if will_comment { format!("{}{}\n{}", COMMENT, line, any) } else { format!("{}\n{}", line, any) }; (MockState { code, index }, will_comment) } } proptest! { #[test] fn test_skip_comment(state in state_skip_comment()) { let answer = skip_comment(State { code: &state.0.code, index: 0 }).unwrap(); let state_after = answer.0; prop_assert_eq!(state.0.index, state_after.index); prop_assert_eq!(state.1, answer.1); prop_assert!( state_after.index <= state.0.code.len(), "\ncode length: {}\nindex: {}\n", state.0.code.len(), state_after.index ); } } const RE_WHITESPACE: &str = "\\s+"; prop_compose! { fn state_skip_spaces()( any in RE_ANY.prop_filter( "Values must not start with whitespace.", |a| a == a.trim_start()), has_spaces in any::<bool>(), spaces in RE_WHITESPACE, ) -> (MockState, bool) { let index = if has_spaces { spaces.len() } else { 0 }; let code = if has_spaces { format!("{}{}", spaces, any) } else { any }; (MockState { code, index }, has_spaces) } } proptest! { #[test] fn test_skip_spaces(state in state_skip_spaces()) { let answer = skip_spaces(State { code: &state.0.code, index: 0 }).unwrap(); let state_after = answer.0; prop_assert_eq!(state.0.index, state_after.index); prop_assert_eq!(state.1, answer.1); prop_assert!( state_after.index <= state.0.code.len(), "\ncode length: {}\nindex: {}\n", state.0.code.len(), state_after.index ); } } prop_compose! { fn state_skip()( will_skip in any::<bool>(), any in RE_ANY.prop_filter( "Values must not start with whitespace or be a comment.", |a| { let a_trimmed = a.trim_start(); !a_trimmed.starts_with(COMMENT) && a == a_trimmed }), )( spaces_comments in if will_skip { prop::collection::vec((RE_WHITESPACE, RE_LINE), 0..10) } else { prop::collection::vec(("", ""), 0) }, will_skip in Just(will_skip), any in Just(any), ) -> (MockState, bool) { let mut code: String = if will_skip { spaces_comments .iter() .flat_map(|(space, comment)| [space.as_str(), COMMENT, comment.as_str(), "\n"]) .collect() } else { String::with_capacity(any.len()) }; let index = code.len(); code.push_str(&any); let will_skip = code.trim_start() != code || code.starts_with(COMMENT); (MockState { code, index }, will_skip) } } proptest! { #[test] fn test_skip(state in state_skip()) { let answer = skip(State { code: &state.0.code, index: 0 }).unwrap(); let state_after = answer.0; prop_assert_eq!(state.0.index, state_after.index); prop_assert_eq!(state.1, answer.1); prop_assert!( state_after.index <= state.0.code.len(), "\ncode length: {}\nindex: {}\n", state.0.code.len(), state_after.index ); } } prop_compose! { fn range(from: usize, to: usize)(from in from..to)( to in from..to, from in Just(from) ) -> (usize, usize) { (from, to) } } // Matches lines with at least a single character. const RE_NON_EMPTY: &str = ".{1,}"; prop_compose! { fn args_highlight()(code in RE_NON_EMPTY)( code in Just(code.clone()), (from, to) in range(0, code.len()).prop_filter( "Values must be `char` boundaries.", move |(from, to)| { code.is_char_boundary(*from) && code.is_char_boundary(*to) }) ) -> (usize, usize, String) { (from, to, code) } } proptest! { #[test] fn test_highlight((from_index, to_index, code) in args_highlight()) { prop_assert_eq!( old_parser::highlight(from_index, to_index, &code), highlight(from_index, to_index, &code) ); } } }
26.944942
100
0.556976
d61116e33ec86581eee50b456319263a3a2e2d8f
828
pub struct Path<'a> { path: &'a [u8], pos: usize, } impl <'a> Path<'a> { pub fn new(path: &[u8]) -> Path { Path { path: path, pos: 0, } } pub fn is_end(&self) -> bool { self.pos + 1 >= self.path.len() } } impl <'a>Iterator for Path<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<Self::Item> { let start_pos = self.pos; let mut stop_pos = self.pos; while stop_pos < self.path.len() { if self.path[stop_pos] == b'/' || self.path[stop_pos] == b'\\' { break; } stop_pos += 1; } if start_pos == stop_pos { None } else { self.pos = stop_pos + 1; Some(&self.path[start_pos..stop_pos]) } } }
20.195122
76
0.429952
f7a33df1faf21f126404152f04c51fcf3a72484c
1,116
use crate::{column_default, Column}; use procfs::{Io, ProcResult, Process}; use std::cmp; use std::collections::HashMap; use std::time::Duration; pub struct Pid { header: String, unit: String, fmt_contents: HashMap<i32, String>, raw_contents: HashMap<i32, i32>, max_width: usize, } impl Pid { pub fn new() -> Self { let header = String::from("PID"); let unit = String::from(""); Pid { fmt_contents: HashMap::new(), raw_contents: HashMap::new(), max_width: cmp::max(header.len(), unit.len()), header, unit, } } } impl Column for Pid { fn add( &mut self, curr_proc: &Process, _prev_proc: &Process, _curr_io: &ProcResult<Io>, _prev_io: &ProcResult<Io>, _interval: &Duration, ) { let raw_content = curr_proc.pid(); let fmt_content = format!("{}", raw_content); self.fmt_contents.insert(curr_proc.pid(), fmt_content); self.raw_contents.insert(curr_proc.pid(), raw_content); } column_default!(i32); }
23.744681
63
0.569892
71dbe66c277a9df2f6b34dd12178b414e884a8a1
21,944
#![allow(clippy::not_unsafe_ptr_arg_deref)] //! Dancing links `Grid` implementation for use in the `Solver`. mod base_node; use base_node::BaseNode; use core::{iter::once, ptr}; use std::collections::VecDeque; /// Dancing links grid, support efficient removal of rows and columns. #[derive(Debug)] pub struct Grid { // This node only left-right neighbors, no children root: *mut Column, arena: bumpalo::Bump, columns: Vec<*mut Column>, num_columns: usize, max_row: usize, } impl Grid { /// Create a new grid with a specified number of columns, and the given /// coordinates filled. /// /// Rows and columns are based 1 indexed for this grid, matching the /// indexing notation for matrices in general. pub fn new(num_columns: usize, coordinates: impl IntoIterator<Item = (usize, usize)>) -> Self { let arena = bumpalo::Bump::new(); let root = Column::new(&arena, 0); let columns = once(root) .chain((1..=num_columns).map(|idx| Column::new(&arena, idx))) .collect::<Vec<_>>(); // Chain all the columns together, including the sentinel root column. for idx in 0..columns.len() { let next_idx = (idx + 1) % columns.len(); let column = columns[idx]; let next_column = columns[next_idx]; Column::add_right(column, next_column); } let mut grid = Grid { root, columns, arena, num_columns, max_row: 0, }; grid.add_all_coordinates(coordinates); grid } fn add_all_coordinates(&mut self, coordinates: impl IntoIterator<Item = (usize, usize)>) { // Deduct one for the sentinel column let mut columns_data: Vec<Vec<_>> = (0..(self.columns.len() - 1)).map(|_| Vec::new()).collect(); for (row, column) in coordinates { debug_assert!( row != 0 && column != 0, "row or column should not equal zero [{:?}].", (row, column) ); debug_assert!( column <= columns_data.len(), "column idx should be in bounds [{:?}]", column ); columns_data[column - 1].push((row, column)); if self.max_row < row { self.max_row = row } } for column_data in &mut columns_data { column_data.sort_unstable_by_key(|(k, _)| *k); } // Map all the data into nodes let mut nodes: Vec<VecDeque<*mut Node>> = columns_data .into_iter() .map(|column_data| { column_data .into_iter() .map(|(row_idx, column_idx)| { let column = self.columns[column_idx]; Node::new(&self.arena, row_idx, column) }) .collect() }) .collect(); // Then, add all the vertical connections, without wrapping around. Skip the // first (sentinel) column. for (node_column, column_header) in nodes.iter_mut().zip(self.columns.iter().skip(1)) { let pair_it = node_column.iter().zip(node_column.iter().skip(1)); for (current_node, next_node) in pair_it { BaseNode::add_below(current_node.cast(), next_node.cast()); } // Connect first and last to header if let Some(first) = node_column.front() { BaseNode::add_below(column_header.cast(), first.cast()); if let Some(last) = node_column.back() { BaseNode::add_above(column_header.cast(), last.cast()); } } } // Then, add all horizontal connections, with wrap around // // To do this we need to select all nodes which have the same row value // and then chain them together. The column data is in sorted order from // before. // // For each column, collect a list with the top (least row value) node. Then, // for each value in the list, collect a subset that contains all the nodes with // the same least row value. They should also be in column order. This // collection will be linked together with wraparound. Then all those nodes that // were selected for the least subset will be replaced from the list with the // next relevant node from the column. let mut top_nodes: Vec<Option<(usize, *mut Node)>> = nodes .iter_mut() .map(|column_data| { let node = column_data.pop_front(); node.map(|node| unsafe { (ptr::read(node).row, node) }) }) .collect(); let mut least_nodes = Vec::<(usize, *mut Node)>::with_capacity(top_nodes.len()); while top_nodes.iter().any(Option::is_some) { let mut least_row = usize::MAX; // Select the subcollection of least row nodes for (idx, row_node_pair) in top_nodes.iter().enumerate() { if let Some((row, node)) = row_node_pair { use core::cmp::Ordering; match row.cmp(&least_row) { Ordering::Equal => { least_nodes.push((idx, *node)); } Ordering::Less => { least_nodes.clear(); least_row = *row; least_nodes.push((idx, *node)); } Ordering::Greater => {} } } } // Link all the least row nodes together // // This is fine for the case of (least_nodes.len() == 1) bc all nodes started // already linked to themselves. for (idx, (_, node)) in least_nodes.iter().enumerate() { let next_node_idx = (idx + 1) % least_nodes.len(); let (_, next_node) = least_nodes[next_node_idx]; BaseNode::add_right(node.cast(), next_node.cast()); } // Replace the least row nodes with the next values from their respective // columns. for (column_idx, _) in least_nodes.drain(..) { top_nodes[column_idx] = nodes[column_idx] .pop_front() .map(|node| unsafe { (ptr::read(node).row, node) }); } } } /// Convert the grid to a dense representation. /// /// This takes the original size of the grid, and only put `true` values for /// locations that are still present in the grid (not covered). pub fn to_dense(&self) -> Box<[Box<[bool]>]> { let seen_coords = self.uncovered_columns().flat_map(|column_ptr| { let column_idx = Column::index(column_ptr); Column::row_indices(column_ptr).map(move |row_idx| (row_idx, column_idx)) }); let mut output = vec![false; self.num_columns * self.max_row]; for (row_idx, column_idx) in seen_coords { output[(row_idx - 1) * self.num_columns + (column_idx - 1)] = true } if self.num_columns == 0 { debug_assert!(output.is_empty()); vec![].into_boxed_slice() } else { output .as_slice() .chunks(self.num_columns) .map(Box::<[_]>::from) .collect() } } /// Return an iterator of pointers to columns that are uncovered. pub fn uncovered_columns(&self) -> impl Iterator<Item = *const Column> { base_node::iter::right(self.root.cast(), Some(self.root.cast())) .map(|base_ptr| base_ptr.cast::<Column>()) } /// Return an iterator of mut pointers to columns that are uncovered. pub fn uncovered_columns_mut(&mut self) -> impl Iterator<Item = *mut Column> { base_node::iter::right_mut(self.root.cast(), Some(self.root.cast())) .map(|base_ptr| base_ptr.cast::<Column>()) } /// Return an iterator over all columns that are in the grid (covered and /// uncovered). pub fn all_columns_mut( &mut self, ) -> impl Iterator<Item = *mut Column> + DoubleEndedIterator + '_ { self.columns .iter() .copied() // Skip the sentinel .skip(1) } /// Return a pointer to a specific `Column`, if it exists. pub fn get_column(&self, index: usize) -> Option<*const Column> { self.columns .get(index) .copied() .map(|column_ptr| column_ptr as *const _) } /// Return a mut pointer to a specific `Column`, if it exists. pub fn get_column_mut(&mut self, index: usize) -> Option<*mut Column> { self.columns.get(index).copied() } /// Return true if there are no uncovered columns in the grid. pub fn is_empty(&self) -> bool { unsafe { let column = ptr::read(self.root); (column.base.right as *const _) == self.root.cast() } } } /// A coordinate inside of a `Grid`. #[derive(Debug, PartialEq, Eq, Hash)] #[repr(C)] pub struct Node { base: BaseNode, row: usize, column: *mut Column, } impl Node { fn new(arena: &bumpalo::Bump, row: usize, column: *mut Column) -> *mut Self { Column::increment_size(column); let node = arena.alloc(Node { base: BaseNode::new(), row, column, }); node.base.set_self_ptr(); node } /// Cover every `Node` that is horizontally adjacent to this `Node`. /// /// This `Node` is not covered. pub fn cover_row(self_ptr: *mut Node) { // Skip over the originating node in the row so that it can be recovered from // the column. base_node::iter::right_mut(self_ptr.cast(), Some(self_ptr.cast())).for_each( |base_ptr| unsafe { let node = ptr::read(base_ptr.cast::<Node>()); Column::decrement_size(node.column); BaseNode::cover_vertical(base_ptr); }, ) } /// Uncover every `Node` that is horizontally adjacent to this `Node`. /// /// This `Node` is not uncovered. pub fn uncover_row(self_ptr: *mut Self) { let base_ptr = self_ptr.cast::<BaseNode>(); base_node::iter::left_mut(base_ptr, Some(base_ptr)).for_each(|base_ptr| unsafe { let node = ptr::read(base_ptr.cast::<Node>()); Column::increment_size(node.column); BaseNode::uncover_vertical(base_ptr); }) } /// Return the row index of this `Node`. pub fn row_index(self_ptr: *const Self) -> usize { unsafe { ptr::read(self_ptr).row } } /// Return the column index of this `Node`. pub fn column_index(self_ptr: *const Self) -> usize { unsafe { let node = ptr::read(self_ptr); let column = ptr::read(node.column); column.index } } /// Return a mut pointer to the `Column` of this `Node`. pub fn column_ptr(self_ptr: *const Self) -> *mut Column { unsafe { let node = ptr::read(self_ptr); node.column } } /// Return an iterator over all `Node`s that are adjacent to this `Node`. pub fn neighbors(self_ptr: *const Self) -> impl Iterator<Item = *const Node> { base_node::iter::left(self_ptr.cast(), None).map(|base_ptr| base_ptr.cast()) } } /// A column inside of a `Grid`. #[derive(Debug, PartialEq, Eq, Hash)] #[repr(C)] pub struct Column { base: BaseNode, size: usize, index: usize, is_covered: bool, } impl Column { fn new(arena: &bumpalo::Bump, index: usize) -> *mut Self { let column = arena.alloc(Column { base: BaseNode::new(), size: 0, is_covered: false, index, }); column.base.set_self_ptr(); column } fn increment_size(self_ptr: *mut Self) { unsafe { let mut column = ptr::read(self_ptr); column.size += 1; ptr::write(self_ptr, column); } } fn decrement_size(self_ptr: *mut Self) { unsafe { let mut column = ptr::read(self_ptr); column.size -= 1; ptr::write(self_ptr, column); } } /// Cover entire column, and any rows that that appear in this column. pub fn cover(self_ptr: *mut Self) { let mut column = unsafe { ptr::read(self_ptr) }; assert!(!column.is_covered); let base_ptr = self_ptr.cast::<BaseNode>(); BaseNode::cover_horizontal(base_ptr); base_node::iter::down_mut(base_ptr, Some(base_ptr)) .for_each(|base_ptr| Node::cover_row(base_ptr.cast())); column.is_covered = true; unsafe { ptr::write(self_ptr, column); } } /// Uncover entire column, and any rows that appear in this column. pub fn uncover(self_ptr: *mut Self) { let mut column = unsafe { ptr::read(self_ptr) }; assert!(column.is_covered); let base_ptr = self_ptr.cast::<BaseNode>(); base_node::iter::up_mut(base_ptr, Some(base_ptr)) .for_each(|base_ptr| Node::uncover_row(base_ptr.cast())); BaseNode::uncover_horizontal(base_ptr); column.is_covered = false; unsafe { ptr::write(self_ptr, column); } } fn add_right(self_ptr: *mut Self, neighbor_ptr: *mut Column) { BaseNode::add_right(self_ptr.cast(), neighbor_ptr.cast()); } /// Return true if there are no uncovered `Node`s in this column. pub fn is_empty(self_ptr: *const Self) -> bool { unsafe { let column = ptr::read(self_ptr); let empty = (column.base.down as *const _) == self_ptr; debug_assert!( (empty && Self::size(self_ptr) == 0) || !empty, "The size should be tracked accurately." ); empty } } /// Return an iterator over the row indices of all uncovered `Node`s in this /// column. pub fn row_indices(self_ptr: *const Self) -> impl Iterator<Item = usize> { Column::rows(self_ptr).map(|node_ptr| unsafe { ptr::read(node_ptr).row }) } /// Return an iterator of pointers to all uncovered `Node`s in this column. pub fn rows(self_ptr: *const Self) -> impl Iterator<Item = *const Node> { base_node::iter::down(self_ptr.cast(), Some(self_ptr.cast())) .map(|base_ptr| base_ptr.cast()) } /// Return an iterator of mut pointers to all uncovered `Node`s in this /// column. pub fn nodes_mut(self_ptr: *mut Self) -> impl Iterator<Item = *mut Node> { base_node::iter::down_mut(self_ptr.cast(), Some(self_ptr.cast())) .map(|base_ptr| base_ptr.cast()) } /// Return the column index. pub fn index(self_ptr: *const Self) -> usize { unsafe { ptr::read(self_ptr).index } } /// Return the number of uncovered nodes in this column. pub fn size(self_ptr: *const Self) -> usize { unsafe { ptr::read(self_ptr).size } } } #[cfg(test)] pub fn to_string(grid: &Grid) -> String { use std::fmt::Write; let mut output = String::new(); let dense = grid.to_dense(); if dense.is_empty() { writeln!(&mut output, "Empty!").unwrap(); return output; } for row in dense.iter() { writeln!( &mut output, "{:?}", row.iter() .map(|yes| if *yes { 1 } else { 0 }) .collect::<Vec<_>>() ) .unwrap(); } output } #[cfg(test)] mod tests { use super::*; #[test] #[rustfmt::skip] fn create_a_small_grid() { let grid = Grid::new(4, vec![(1, 1), (1, 4), (2, 2), (3, 3), (4, 1), (4, 4)]); assert_eq!( grid.to_dense(), [ true, false, false, true, false, true, false, false, false, false, true, false, true, false, false, true ] .chunks(4) .map(Box::<[_]>::from) .collect() ); } #[test] #[rustfmt::skip] fn create_weird_grids() { let thin_grid = Grid::new(1, vec![ (1, 1), (2, 1), (3, 1), // skip 4 (5, 1), // skip 6, 7 (8, 1) ]); // The reasoning behind having the skipped rows not show up in // the dense output is that those rows are not present at all in // the assert_eq!( thin_grid.to_dense(), [ true, true, true, false, true, false, false, true ] .chunks(1) .map(Box::<[_]>::from) .collect() ); assert!(!thin_grid.is_empty()); let very_thin_grid = Grid::new(0, vec![]); assert_eq!(very_thin_grid.to_dense(), vec![].into_boxed_slice()); assert!(very_thin_grid.is_empty()); } #[test] #[rustfmt::skip] fn cover_uncover_column() { let mut grid = Grid::new(4, vec![(1, 1), (1, 4), (2, 2), (3, 3), (4, 1), (4, 4)]); // mutate the grid Column::cover(grid.all_columns_mut().nth(3).unwrap()); // Check remaining columns assert!(grid .uncovered_columns() .map(|column_ptr| unsafe { ptr::read(column_ptr).index }) .eq(1..=3)); assert_eq!( grid.to_dense(), [ false, false, false, false, false, true, false, false, false, false, true, false, false, false, false, false ] .chunks(4) .map(Box::<[_]>::from) .collect() ); // mutate the grid Column::uncover(grid.all_columns_mut().nth(3).unwrap()); // Check remaining columns assert!(grid .uncovered_columns() .map(|column_ptr| unsafe { ptr::read(column_ptr).index }) .eq(1..=4)); assert_eq!( grid.to_dense(), [ true, false, false, true, false, true, false, false, false, false, true, false, true, false, false, true ] .chunks(4) .map(Box::<[_]>::from) .collect() ); } #[test] #[rustfmt::skip] fn cover_uncover_all() { let mut grid = Grid::new(4, vec![ (1, 1), (1, 4), (2, 2), (3, 3), (4, 1), (4, 4) ]); // mutate the grid for column_ptr in grid.all_columns_mut() { Column::cover(column_ptr) } // Check remaining columns assert_eq!(grid.uncovered_columns().map(|column_ptr| unsafe { ptr::read(column_ptr).index }).count(), 0); assert_eq!( grid.to_dense(), [ false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false ] .chunks(4) .map(Box::<[_]>::from) .collect() ); assert!(grid.is_empty()); // mutate the grid for column_ptr in grid.all_columns_mut().rev() { Column::uncover(column_ptr) } // Check remaining columns assert!(grid.uncovered_columns().map(|column_ptr| unsafe { ptr::read(column_ptr).index }).eq(1..=4)); assert_eq!( grid.to_dense(), [ true, false, false, true, false, true, false, false, false, false, true, false, true, false, false, true ] .chunks(4) .map(Box::<[_]>::from) .collect() ); assert!(!grid.is_empty()); } #[test] #[rustfmt::skip] fn latin_square_cover_1() { // [1, 0, 0, 0, 1, 0] // [0, 1, 1, 0, 1, 0] // [1, 0, 0, 1, 0, 1] // [0, 1, 0, 0, 0, 1] let mut grid = Grid::new(6, vec![ (1, 1), (1, 5), (2, 2), (2, 3), (2, 5), (3, 1), (3, 4), (3, 6), (4, 2), (4, 6), ]); assert_eq!( grid.to_dense(), [ true, false, false, false, true, false, false, true, true, false, true, false, true, false, false, true, false, true, false, true, false, false, false, true, ] .chunks(6) .map(Box::<[_]>::from) .collect() ); assert!(!grid.is_empty()); Column::cover(grid.get_column_mut(2).unwrap()); Column::cover(grid.get_column_mut(3).unwrap()); Column::cover(grid.get_column_mut(5).unwrap()); assert_eq!( grid.to_dense(), [ false, false, false, false, false, false, false, false, false, false, false, false, true, false, false, true, false, true, false, false, false, false, false, false, ] .chunks(6) .map(Box::<[_]>::from) .collect() ); } }
30.6053
113
0.507246
08138e60af9e373f0530292b990025a70166de97
51,813
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::clock::now; use crate::message::action_fuse::ActionFuseBuilder; use crate::message::base::{ filter, group, role, Address, Audience, MessageEvent, MessengerType, Payload, Role, Status, }; use crate::message::messenger::TargetedMessengerClient; use crate::message::receptor::Receptor; use crate::tests::message_utils::verify_payload; use fuchsia_zircon::DurationNum; use futures::future::BoxFuture; use futures::lock::Mutex; use futures::StreamExt; use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; use std::task::Poll; #[derive(Clone, PartialEq, Debug, Copy)] pub(crate) enum TestMessage { Foo, Bar, Baz, Qux, Thud, } #[derive(Clone, Eq, PartialEq, Debug, Copy, Hash)] pub(crate) enum TestAddress { Foo(u64), } #[derive(Clone, Eq, PartialEq, Debug, Copy, Hash)] pub(crate) enum TestRole { Foo, Bar, } /// Ensures the delivery result matches expected value. async fn verify_result< P: Payload + PartialEq + 'static, A: Address + PartialEq + 'static, R: Role + PartialEq + 'static, >( expected: Status, receptor: &mut Receptor<P, A, R>, ) { while let Some(message_event) = receptor.next().await { if let MessageEvent::Status(status) = message_event { if status == expected { return; } } } panic!("Didn't receive result expected"); } static ORIGINAL: TestMessage = TestMessage::Foo; static MODIFIED: TestMessage = TestMessage::Qux; static MODIFIED_2: TestMessage = TestMessage::Thud; static BROADCAST: TestMessage = TestMessage::Baz; static REPLY: TestMessage = TestMessage::Bar; mod test { use super::*; use crate::message_hub_definition; message_hub_definition!(TestMessage, TestAddress, TestRole); } mod num_test { use crate::message_hub_definition; message_hub_definition!(u64, u64); } /// Tests message client creation results in unique ids. #[fuchsia_async::run_until_stalled(test)] async fn test_message_client_equality() { let delegate = test::message::create_hub(); let (messenger, _) = delegate.create(MessengerType::Unbound).await.unwrap(); let (_, mut receptor) = delegate.create(MessengerType::Unbound).await.unwrap(); messenger.message(ORIGINAL, Audience::Broadcast).send(); let (_, client_1) = receptor.next_payload().await.unwrap(); messenger.message(ORIGINAL, Audience::Broadcast).send(); let (_, client_2) = receptor.next_payload().await.unwrap(); assert!(client_1 != client_2); assert_eq!(client_1, client_1.clone()); } /// Tests messenger creation and address space collision. #[fuchsia_async::run_until_stalled(test)] async fn test_messenger_creation() { let delegate = num_test::message::create_hub(); let address = 1; let messenger_1_result = delegate.create(MessengerType::Addressable(address)).await; assert!(messenger_1_result.is_ok()); assert!(delegate.create(MessengerType::Addressable(address)).await.is_err()); } /// Tests whether the client is reported as present after being created. #[fuchsia_async::run_until_stalled(test)] async fn test_messenger_presence() { let delegate = num_test::message::create_hub(); // Create unbound messenger let (_, receptor) = delegate.create(MessengerType::Unbound).await.expect("messenger should be created"); // Check for messenger's presence assert!(delegate.contains(receptor.get_signature()).await.expect("check should complete")); // Check for an address that shouldn't exist #[allow(clippy::bool_assert_comparison)] { assert_eq!( delegate .contains(num_test::message::Signature::Address(1)) .await .expect("check should complete"), false ); } } /// Tests messenger creation and address space collision. #[fuchsia_async::run_until_stalled(test)] async fn test_messenger_deletion() { let delegate = num_test::message::create_hub(); let address = 1; { let (_, _) = delegate.create(MessengerType::Addressable(address)).await.unwrap(); // By the time this subsequent create happens, the previous messenger and // receptor belonging to this address should have gone out of scope and // freed up the address space. assert!(delegate.create(MessengerType::Addressable(address)).await.is_ok()); } { // Holding onto the MessengerClient should prevent deletion. let (_messenger_client, _) = delegate.create(MessengerType::Addressable(address)).await.unwrap(); assert!(delegate.create(MessengerType::Addressable(address)).await.is_err()); } { // Holding onto the Receptor should prevent deletion. let (_, _receptor) = delegate.create(MessengerType::Addressable(address)).await.unwrap(); assert!(delegate.create(MessengerType::Addressable(address)).await.is_err()); } } #[fuchsia_async::run_until_stalled(test)] async fn test_messenger_deletion_with_fingerprint() { let delegate = num_test::message::create_hub(); let address = 1; let (_, mut receptor) = delegate.create(MessengerType::Addressable(address)).await.expect("should get receptor"); delegate.delete(receptor.get_signature()); assert!(receptor.next().await.is_none()); } /// Tests basic functionality of the MessageHub, ensuring messages and replies /// are properly delivered. #[fuchsia_async::run_until_stalled(test)] async fn test_end_to_end_messaging() { let delegate = test::message::create_hub(); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receptor_2) = delegate.create(MessengerType::Addressable(TestAddress::Foo(2))).await.unwrap(); let mut reply_receptor = messenger_client_1.message(ORIGINAL, Audience::Address(TestAddress::Foo(2))).send(); verify_payload( ORIGINAL, &mut receptor_2, Some(Box::new(|client| -> BoxFuture<'_, ()> { Box::pin(async move { let _ = client.reply(REPLY).send(); }) })), ) .await; verify_payload(REPLY, &mut reply_receptor, None).await; } /// Tests forwarding behavior, making sure a message is forwarded in the case /// the client does nothing with it. #[fuchsia_async::run_until_stalled(test)] async fn test_implicit_forward() { let delegate = test::message::create_hub(); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receiver_2) = delegate.create(MessengerType::Broker(None)).await.unwrap(); let (_, mut receiver_3) = delegate.create(MessengerType::Addressable(TestAddress::Foo(3))).await.unwrap(); let mut reply_receptor = messenger_client_1.message(ORIGINAL, Audience::Address(TestAddress::Foo(3))).send(); // Ensure observer gets payload and then do nothing with message. verify_payload(ORIGINAL, &mut receiver_2, None).await; verify_payload( ORIGINAL, &mut receiver_3, Some(Box::new(|client| -> BoxFuture<'_, ()> { Box::pin(async move { let _ = client.reply(REPLY).send(); }) })), ) .await; // Ensure observer gets payload and then do nothing with message. verify_payload(REPLY, &mut receiver_2, None).await; verify_payload(REPLY, &mut reply_receptor, None).await; } /// Exercises the observation functionality. Makes sure a broker who has /// indicated they would like to participate in a message path receives the /// reply. #[fuchsia_async::run_until_stalled(test)] async fn test_observe_addressable() { let delegate = test::message::create_hub(); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receptor_2) = delegate.create(MessengerType::Broker(None)).await.unwrap(); let (_, mut receptor_3) = delegate.create(MessengerType::Addressable(TestAddress::Foo(3))).await.unwrap(); let mut reply_receptor = messenger_client_1.message(ORIGINAL, Audience::Address(TestAddress::Foo(3))).send(); let observe_receptor = Arc::new(Mutex::new(None)); verify_payload(ORIGINAL, &mut receptor_2, { let observe_receptor = observe_receptor.clone(); Some(Box::new(move |mut client| -> BoxFuture<'_, ()> { Box::pin(async move { let mut receptor = observe_receptor.lock().await; *receptor = Some(client.spawn_observer()); }) })) }) .await; verify_payload( ORIGINAL, &mut receptor_3, Some(Box::new(|client| -> BoxFuture<'_, ()> { Box::pin(async move { let _ = client.reply(REPLY).send(); }) })), ) .await; if let Some(mut receptor) = observe_receptor.lock().await.take() { verify_payload(REPLY, &mut receptor, None).await; } else { panic!("A receptor should have been assigned") } verify_payload(REPLY, &mut reply_receptor, None).await; } /// Validates that timeout status is reached when there is no response #[test] fn test_timeout() { let mut executor = fuchsia_async::TestExecutor::new_with_fake_time().expect("Failed to create executor"); let timeout_ms = 1000; let fut = async move { let delegate = test::message::create_hub(); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receptor_2) = delegate.create(MessengerType::Addressable(TestAddress::Foo(2))).await.unwrap(); let mut reply_receptor = messenger_client_1 .message(ORIGINAL, Audience::Address(TestAddress::Foo(2))) .set_timeout(Some(timeout_ms.millis())) .send(); verify_payload( ORIGINAL, &mut receptor_2, Some(Box::new(|_| -> BoxFuture<'_, ()> { Box::pin(async move { // Do not respond. }) })), ) .await; verify_result(Status::Timeout, &mut reply_receptor).await; }; pin_utils::pin_mut!(fut); let _result = loop { executor.wake_main_future(); let new_time = fuchsia_async::Time::from_nanos( executor.now().into_nanos() + fuchsia_zircon::Duration::from_millis(timeout_ms).into_nanos(), ); match executor.run_one_step(&mut fut) { Some(Poll::Ready(x)) => break x, None => panic!("Executor stalled"), Some(Poll::Pending) => { executor.set_fake_time(new_time); } } }; } /// Tests the broadcast functionality. Ensures all non-sending, addressable /// messengers receive a broadcast message. #[fuchsia_async::run_until_stalled(test)] async fn test_broadcast() { let delegate = test::message::create_hub(); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receptor_2) = delegate.create(MessengerType::Addressable(TestAddress::Foo(2))).await.unwrap(); let (_, mut receptor_3) = delegate.create(MessengerType::Addressable(TestAddress::Foo(3))).await.unwrap(); messenger_client_1.message(ORIGINAL, Audience::Broadcast).send(); verify_payload(ORIGINAL, &mut receptor_2, None).await; verify_payload(ORIGINAL, &mut receptor_3, None).await; } /// Verifies delivery statuses are properly relayed back to the original sender. #[fuchsia_async::run_until_stalled(test)] async fn test_delivery_status() { let delegate = test::message::create_hub(); let known_receiver_address = TestAddress::Foo(2); let unknown_address = TestAddress::Foo(3); let (messenger_client_1, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (_, mut receptor_2) = delegate.create(MessengerType::Addressable(known_receiver_address)).await.unwrap(); { let mut receptor = messenger_client_1.message(ORIGINAL, Audience::Address(known_receiver_address)).send(); // Ensure observer gets payload and then do nothing with message. verify_payload(ORIGINAL, &mut receptor_2, None).await; verify_result(Status::Received, &mut receptor).await; } { let mut receptor = messenger_client_1.message(ORIGINAL, Audience::Address(unknown_address)).send(); verify_result(Status::Undeliverable, &mut receptor).await; } } /// Verifies message is delivered even if messenger is deleted right /// after. #[fuchsia_async::run_until_stalled(test)] async fn test_send_delete() { let delegate = test::message::create_hub(); let (_, mut receptor_2) = delegate .create(MessengerType::Addressable(TestAddress::Foo(2))) .await .expect("client should be created"); { let (messenger_client_1, _) = delegate.create(MessengerType::Unbound).await.expect("client should be created"); messenger_client_1.message(ORIGINAL, Audience::Broadcast).send().ack(); } // Ensure observer gets payload and then do nothing with message. verify_payload(ORIGINAL, &mut receptor_2, None).await; } /// Verifies beacon returns error when receptor goes out of scope. #[fuchsia_async::run_until_stalled(test)] async fn test_beacon_error() { let delegate = test::message::create_hub(); let (messenger_client, _) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); { let (_, mut receptor) = delegate.create(MessengerType::Addressable(TestAddress::Foo(2))).await.unwrap(); verify_result( Status::Received, &mut messenger_client.message(ORIGINAL, Audience::Address(TestAddress::Foo(2))).send(), ) .await; verify_payload(ORIGINAL, &mut receptor, None).await; } verify_result( Status::Undeliverable, &mut messenger_client.message(ORIGINAL, Audience::Address(TestAddress::Foo(2))).send(), ) .await; } /// Verifies Acknowledge is fully passed back. #[fuchsia_async::run_until_stalled(test)] async fn test_acknowledge() { let delegate = test::message::create_hub(); let (_, mut receptor) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); let (messenger, _) = delegate.create(MessengerType::Unbound).await.unwrap(); let mut message_receptor = messenger.message(ORIGINAL, Audience::Address(TestAddress::Foo(1))).send(); verify_payload(ORIGINAL, &mut receptor, None).await; assert!(message_receptor.wait_for_acknowledge().await.is_ok()); } /// Verifies observers can participate in messaging. #[fuchsia_async::run_until_stalled(test)] async fn test_messenger_behavior() { // Run tests twice to ensure no one instance leads to a deadlock. for _ in 0..2 { verify_messenger_behavior(MessengerType::Broker(None)).await; verify_messenger_behavior(MessengerType::Unbound).await; verify_messenger_behavior(MessengerType::Addressable(TestAddress::Foo(2))).await; } } async fn verify_messenger_behavior( messenger_type: MessengerType<TestMessage, TestAddress, TestRole>, ) { let delegate = test::message::create_hub(); // Messenger to receive message. let (target_client, mut target_receptor) = delegate.create(MessengerType::Addressable(TestAddress::Foo(1))).await.unwrap(); // Author Messenger. let (test_client, mut test_receptor) = delegate.create(messenger_type).await.unwrap(); // Send top level message from the Messenger. let mut reply_receptor = test_client.message(ORIGINAL, Audience::Address(TestAddress::Foo(1))).send(); let captured_signature = Arc::new(Mutex::new(None)); // Verify target messenger received message and capture Signature. verify_payload(ORIGINAL, &mut target_receptor, { let captured_signature = captured_signature.clone(); Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { let mut author = captured_signature.lock().await; *author = Some(client.get_author()); client.reply(REPLY).send().ack(); }) })) }) .await; // Verify messenger received reply on the message receptor. verify_payload(REPLY, &mut reply_receptor, None).await; let messenger_signature = captured_signature.lock().await.take().expect("signature should be populated"); // Send top level message to Messenger. target_client.message(ORIGINAL, Audience::Messenger(messenger_signature)).send().ack(); // Verify Messenger received message. verify_payload(ORIGINAL, &mut test_receptor, None).await; } /// Ensures unbound messengers operate properly #[fuchsia_async::run_until_stalled(test)] async fn test_unbound_messenger() { let delegate = test::message::create_hub(); let (unbound_messenger_1, _) = delegate.create(MessengerType::Unbound).await.unwrap(); let (_, mut unbound_receptor) = delegate.create(MessengerType::Unbound).await.expect("messenger should be created"); let mut reply_receptor = unbound_messenger_1 .message(ORIGINAL, Audience::Messenger(unbound_receptor.get_signature())) .send(); // Verify target messenger received message and send response. verify_payload( ORIGINAL, &mut unbound_receptor, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { client.reply(REPLY).send().ack(); }) })), ) .await; verify_payload(REPLY, &mut reply_receptor, None).await; } /// Ensures next_payload returns the correct values. #[fuchsia_async::run_until_stalled(test)] async fn test_next_payload() { let delegate = test::message::create_hub(); let (unbound_messenger_1, _) = delegate.create(MessengerType::Unbound).await.unwrap(); let (_, mut unbound_receptor_2) = delegate.create(MessengerType::Unbound).await.expect("should create messenger"); unbound_messenger_1 .message(ORIGINAL, Audience::Messenger(unbound_receptor_2.get_signature())) .send() .ack(); let receptor_result = unbound_receptor_2.next_payload().await; let (payload, _) = receptor_result.unwrap(); assert_eq!(payload, ORIGINAL); { let mut receptor = unbound_messenger_1.message(REPLY, Audience::Address(TestAddress::Foo(1))).send(); // Should return an error let receptor_result = receptor.next_payload().await; assert!(receptor_result.is_err()); } } /// Exercises basic action fuse behavior. #[fuchsia_async::run_until_stalled(test)] async fn test_action_fuse() { // Channel to send the message from the fuse. let (tx, mut rx) = futures::channel::mpsc::unbounded::<()>(); { let _ = ActionFuseBuilder::new() .add_action(Box::new(move || { tx.unbounded_send(()).unwrap(); })) .build(); } assert!(rx.next().await.is_some()); } /// Exercises chained action fuse behavior #[fuchsia_async::run_until_stalled(test)] async fn test_chained_action_fuse() { // Channel to send the message from the fuse. let (tx, mut rx) = futures::channel::mpsc::unbounded::<()>(); let (tx2, mut rx2) = futures::channel::mpsc::unbounded::<()>(); { let _ = ActionFuseBuilder::new() .add_action(Box::new(move || { tx.unbounded_send(()).unwrap(); })) .chain_fuse( ActionFuseBuilder::new() .add_action(Box::new(move || { tx2.unbounded_send(()).unwrap(); })) .build(), ) .build(); } // Root should fire first assert!(rx.next().await.is_some()); // Then chain reaction assert!(rx2.next().await.is_some()); } /// Exercises timestamp value. #[fuchsia_async::run_until_stalled(test)] async fn test_message_timestamp() { let delegate = test::message::create_hub(); let (messenger, _) = delegate.create(MessengerType::Unbound).await.unwrap(); let (_, mut receptor) = delegate.create(MessengerType::Unbound).await.unwrap(); let init_time = now(); messenger.message(ORIGINAL, Audience::Broadcast).send().ack(); let post_send_time = now(); while let Some(message_event) = receptor.next().await { if let MessageEvent::Message(incoming_payload, client) = message_event { assert_eq!(ORIGINAL, incoming_payload); let current_time = now(); let send_time = client.get_timestamp(); // Ensures the event timestamp was not taken before the event assert!(init_time <= send_time); // Compared against time right after message was sent to ensure that // timestamp was from the actual send time and not from when the // message was posted in the message hub. assert!(send_time <= post_send_time); // Make sure the time stamp was captured before the request for it. assert!(post_send_time <= current_time); return; } else { panic!("Should have received the broadcast first"); } } } /// Verifies that the proper signal is fired when a receptor disappears. #[fuchsia_async::run_until_stalled(test)] async fn test_bind_to_recipient() { let delegate = test::message::create_hub(); let (tx, mut rx) = futures::channel::mpsc::unbounded::<()>(); let (_, mut receptor) = delegate.create(MessengerType::Unbound).await.expect("should create messenger"); { let (scoped_messenger, _scoped_receptor) = delegate.create(MessengerType::Unbound).await.unwrap(); scoped_messenger .message(ORIGINAL, Audience::Messenger(receptor.get_signature())) .send() .ack(); if let Some(MessageEvent::Message(payload, mut client)) = receptor.next().await { assert_eq!(payload, ORIGINAL); client .bind_to_recipient( ActionFuseBuilder::new() .add_action(Box::new(move || { tx.unbounded_send(()).unwrap(); })) .build(), ) .await; } else { panic!("Should have received message"); } } // Receptor has fallen out of scope, should receive callback. assert!(rx.next().await.is_some()); } #[fuchsia_async::run_until_stalled(test)] async fn test_reply_propagation() { let delegate = test::message::create_hub(); // Create messenger to send source message. let (sending_messenger, _) = delegate.create(MessengerType::Unbound).await.expect("sending messenger should be created"); // Create broker to propagate a derived message. let (_, mut broker) = delegate .create(MessengerType::Broker(Some(filter::Builder::single(filter::Condition::Custom( Arc::new(move |message| *message.payload() == REPLY), ))))) .await .expect("broker should be created"); // Create messenger to be target of source message. let (_, mut target_receptor) = delegate.create(MessengerType::Unbound).await.expect("target messenger should be created"); // Send top level message. let mut result_receptor = sending_messenger .message(ORIGINAL, Audience::Messenger(target_receptor.get_signature())) .send(); // Ensure target receives message and reply back. verify_payload( ORIGINAL, &mut target_receptor, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { client.reply(REPLY).send().ack(); }) })), ) .await; // Ensure broker receives reply and propagate modified message. verify_payload( REPLY, &mut broker, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { client.propagate(MODIFIED).send().ack(); }) })), ) .await; // Ensure original sender gets reply. verify_payload(MODIFIED, &mut result_receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_propagation() { let delegate = test::message::create_hub(); // Create messenger to send source message. let (sending_messenger, sending_receptor) = delegate.create(MessengerType::Unbound).await.expect("sending messenger should be created"); let sending_signature = sending_receptor.get_signature(); // Create brokers to propagate a derived message. let (_, mut broker_1) = delegate.create(MessengerType::Broker(None)).await.expect("broker should be created"); let modifier_1_signature = broker_1.get_signature(); let (_, mut broker_2) = delegate.create(MessengerType::Broker(None)).await.expect("broker should be created"); let modifier_2_signature = broker_2.get_signature(); // Create messenger to be target of source message. let (_, mut target_receptor) = delegate.create(MessengerType::Unbound).await.expect("target messenger should be created"); // Send top level message. let mut result_receptor = sending_messenger .message(ORIGINAL, Audience::Messenger(target_receptor.get_signature())) .send(); // Ensure broker 1 receives original message and propagate modified message. verify_payload( ORIGINAL, &mut broker_1, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { client.propagate(MODIFIED).send().ack(); }) })), ) .await; // Ensure broker 2 receives modified message and propagates a differen // modified message. verify_payload( MODIFIED, &mut broker_2, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { client.propagate(MODIFIED_2).send().ack(); }) })), ) .await; // Ensure target receives message and reply back. verify_payload( MODIFIED_2, &mut target_receptor, Some(Box::new(move |client| -> BoxFuture<'_, ()> { Box::pin(async move { // ensure the original author is attributed to the message. assert_eq!(client.get_author(), sending_signature); // ensure the modifiers are present. assert!(client.get_modifiers().contains(&modifier_1_signature)); assert!(client.get_modifiers().contains(&modifier_2_signature)); // ensure the message author has not been modified. client.reply(REPLY).send().ack(); }) })), ) .await; // Ensure original sender gets reply. verify_payload(REPLY, &mut result_receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_audience_broadcast() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Receptor to receive both broadcast and targeted messages. let (_, mut receptor) = delegate.create(MessengerType::Unbound).await.expect("target receptor should be created"); // Filter to target only broadcasts. let filter = filter::Builder::single(filter::Condition::Audience(Audience::Broadcast)); // Broker to receive broadcast. It should not receive targeted messages. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send targeted message. messenger.message(ORIGINAL, Audience::Messenger(receptor.get_signature())).send().ack(); // Verify receptor gets message. verify_payload(ORIGINAL, &mut receptor, None).await; // Broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Ensure broker gets broadcast. If the targeted message was received, this // will fail. verify_payload(BROADCAST, &mut broker_receptor, None).await; // Ensure receptor gets broadcast. verify_payload(BROADCAST, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_audience_messenger() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Receptor to receive both broadcast and targeted messages. let (_, mut receptor) = delegate.create(MessengerType::Unbound).await.expect("target messenger should be created"); // Filter to target only messenger. let filter = filter::Builder::single(filter::Condition::Audience(Audience::Messenger( receptor.get_signature(), ))); // Broker that should only target messages for a given messenger. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Verify receptor gets message. verify_payload(BROADCAST, &mut receptor, None).await; // Send targeted message. messenger.message(ORIGINAL, Audience::Messenger(receptor.get_signature())).send().ack(); // Ensure broker gets message. If the broadcast message was received, this // will fail. verify_payload(ORIGINAL, &mut broker_receptor, None).await; // Ensure receptor gets broadcast. verify_payload(ORIGINAL, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_audience_address() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Receptor to receive both broadcast and targeted messages. let target_address = TestAddress::Foo(2); let (_, mut receptor) = delegate .create(MessengerType::Addressable(target_address)) .await .expect("target receptor should be created"); // Filter to target only messenger. let filter = filter::Builder::single(filter::Condition::Audience(Audience::Address(target_address))); // Broker that should only target messages for a given messenger. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Verify receptor gets message. verify_payload(BROADCAST, &mut receptor, None).await; // Send targeted message. messenger.message(ORIGINAL, Audience::Address(target_address)).send().ack(); // Ensure broker gets message. If the broadcast message was received, this // will fail. verify_payload(ORIGINAL, &mut broker_receptor, None).await; // Ensure receptor gets broadcast. verify_payload(ORIGINAL, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_author() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send targeted message. let author_address = TestAddress::Foo(1); let (messenger, _) = delegate .create(MessengerType::Addressable(author_address)) .await .expect("messenger should be created"); // Receptor to receive targeted message. let target_address = TestAddress::Foo(2); let (_, mut receptor) = delegate .create(MessengerType::Addressable(target_address)) .await .expect("target receptor should be created"); // Filter to target only messages with a particular author. let filter = filter::Builder::single(filter::Condition::Author( test::message::Signature::Address(author_address), )); // Broker that should only target messages for a given author. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send targeted message. messenger.message(ORIGINAL, Audience::Address(target_address)).send().ack(); // Ensure broker gets message. verify_payload(ORIGINAL, &mut broker_receptor, None).await; // Ensure receptor gets message. verify_payload(ORIGINAL, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_custom() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Filter to target only the ORIGINAL message. let filter = filter::Builder::single(filter::Condition::Custom(Arc::new(|message| { *message.payload() == ORIGINAL }))); // Broker that should only target ORIGINAL messages. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Send original message. messenger.message(ORIGINAL, Audience::Broadcast).send().ack(); // Ensure broker gets message. If the broadcast message was received, this // will fail. verify_payload(ORIGINAL, &mut broker_receptor, None).await; } /// Verify that using a closure that captures a variable for a custom filter works, since it can't /// be used in place of an function pointer. #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_caputring_closure() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Filter to target only the Foo message. let expected_payload = TestMessage::Foo; let filter = filter::Builder::single(filter::Condition::Custom(Arc::new(move |message| { *message.payload() == expected_payload }))); // Broker that should only target Foo messages. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Send foo message. messenger.message(expected_payload, Audience::Broadcast).send().ack(); // Ensure broker gets message. If the broadcast message was received, this // will fail. verify_payload(expected_payload, &mut broker_receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_combined_any() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate .create(MessengerType::Unbound) .await .expect("broadcast messenger should be created"); // Receptor for messages. let target_address = TestAddress::Foo(2); let (_, mut receptor) = delegate .create(MessengerType::Addressable(target_address)) .await .expect("addressable messenger should be created"); // Filter to target only the ORIGINAL message. let filter = filter::Builder::new( filter::Condition::Custom(Arc::new(|message| *message.payload() == ORIGINAL)), filter::Conjugation::Any, ) .append(filter::Condition::Filter(filter::Builder::single(filter::Condition::Audience( Audience::Broadcast, )))) .build(); // Broker that should only target ORIGINAL messages and broadcast audiences. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send broadcast message. messenger.message(BROADCAST, Audience::Broadcast).send().ack(); // Receptor should receive match based on broadcast audience verify_payload(BROADCAST, &mut broker_receptor, None).await; // Other receptors should receive the broadcast as well. verify_payload(BROADCAST, &mut receptor, None).await; // Send original message to target. messenger.message(ORIGINAL, Audience::Address(target_address)).send().ack(); // Ensure broker gets message. verify_payload(ORIGINAL, &mut broker_receptor, None).await; // Ensure target gets message as well. verify_payload(ORIGINAL, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_broker_filter_combined_all() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate.create(MessengerType::Unbound).await.expect("sending messenger should be created"); // Receptor for messages. let target_address = TestAddress::Foo(2); let (_, mut receptor) = delegate .create(MessengerType::Addressable(target_address)) .await .expect("receiving messenger should be created"); // Filter to target only the ORIGINAL message. let filter = filter::Builder::new( filter::Condition::Custom(Arc::new(|message| *message.payload() == ORIGINAL)), filter::Conjugation::All, ) .append(filter::Condition::Filter(filter::Builder::single(filter::Condition::Audience( Audience::Address(target_address), )))) .build(); // Broker that should only target ORIGINAL messages and broadcast audiences. let (_, mut broker_receptor) = delegate .create(MessengerType::Broker(Some(filter))) .await .expect("broker should be created"); // Send REPLY message. Should not match broker since content does not match. messenger.message(REPLY, Audience::Address(target_address)).send().ack(); // Other receptors should receive the broadcast as well. verify_payload(REPLY, &mut receptor, None).await; // Send ORIGINAL message to target. messenger.message(ORIGINAL, Audience::Address(target_address)).send().ack(); // Ensure broker gets message. verify_payload(ORIGINAL, &mut broker_receptor, None).await; // Ensure target gets message as well. verify_payload(ORIGINAL, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_group_message() { // Prepare a message hub with a sender and multiple targets. let delegate = test::message::create_hub(); // Messenger to send message. let (messenger, _) = delegate.create(MessengerType::Unbound).await.unwrap(); // Receptors for messages. let target_address_1 = TestAddress::Foo(1); let (_, mut receptor_1) = delegate.create(MessengerType::Addressable(target_address_1)).await.unwrap(); let target_address_2 = TestAddress::Foo(2); let (_, mut receptor_2) = delegate.create(MessengerType::Addressable(target_address_2)).await.unwrap(); let (_, mut receptor_3) = delegate.create(MessengerType::Unbound).await.unwrap(); let audience = Audience::Group( group::Builder::new() .add(Audience::Address(target_address_1)) .add(Audience::Address(target_address_2)) .build(), ); // Send message targeting both receptors. messenger.message(ORIGINAL, audience).send().ack(); // Receptors should both receive the message. verify_payload(ORIGINAL, &mut receptor_1, None).await; verify_payload(ORIGINAL, &mut receptor_2, None).await; // Broadcast and ensure the untargeted receptor gets that message next messenger.message(BROADCAST, Audience::Broadcast).send().ack(); verify_payload(BROADCAST, &mut receptor_3, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_group_message_redundant_targets() { // Prepare a message hub with a sender, broker, and target. let delegate = test::message::create_hub(); // Messenger to send broadcast message and targeted message. let (messenger, _) = delegate.create(MessengerType::Unbound).await.unwrap(); // Receptors for messages. let target_address = TestAddress::Foo(1); let (_, mut receptor) = delegate .create(MessengerType::Addressable(target_address)) .await .expect("messenger should be created"); // Create audience with multiple references to same messenger. let audience = Audience::Group( group::Builder::new() .add(Audience::Address(target_address)) .add(Audience::Messenger(receptor.get_signature())) .add(Audience::Broadcast) .build(), ); // Send Original message. messenger.message(ORIGINAL, audience.clone()).send().ack(); // Receptor should receive message. verify_payload(ORIGINAL, &mut receptor, None).await; // Send Reply message. messenger.message(REPLY, audience).send().ack(); // Receptor should receive Reply message and not another Original message. verify_payload(REPLY, &mut receptor, None).await; } #[fuchsia_async::run_until_stalled(test)] async fn test_audience_matching() { let target_audience: Audience<TestAddress> = Audience::Address(TestAddress::Foo(1)); // An audience should contain itself. assert!(target_audience.contains(&target_audience)); // An audience with only broadcast should not match. #[allow(clippy::bool_assert_comparison)] { let audience = Audience::Group(group::Builder::new().add(Audience::Broadcast).build()); assert_eq!(audience.contains(&target_audience), false); } // An audience group with the target audience should match. { let audience = Audience::Group(group::Builder::new().add(target_audience.clone()).build()); assert!(audience.contains(&target_audience)); } // An audience group with the target audience nested should match. { let audience = Audience::Group( group::Builder::new() .add(Audience::Group(group::Builder::new().add(target_audience.clone()).build())) .build(), ); assert!(audience.contains(&target_audience)); } // An a subset should be contained within a superset and a superset should // not be contained in a subset. { let target_audience_2 = Audience::Address(TestAddress::Foo(2)); let target_audience_3 = Audience::Address(TestAddress::Foo(3)); let audience_subset = Audience::Group( group::Builder::new() .add(target_audience.clone()) .add(target_audience_2.clone()) .build(), ); let audience_set = Audience::Group( group::Builder::new() .add(target_audience) .add(target_audience_2) .add(target_audience_3) .build(), ); assert!(audience_set.contains(&audience_subset)); #[allow(clippy::bool_assert_comparison)] { assert_eq!(audience_subset.contains(&audience_set), false); } } } /// Ensures all members of a role receive messages. #[fuchsia_async::run_until_stalled(test)] async fn test_roles_membership() { // Prepare a message hub. let delegate = test::message::create_hub(); // Create messengers who participate in roles let (_, mut foo_role_receptor) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role::Signature::role(TestRole::Foo)) .build() .await .expect("recipient messenger should be created"); let (_, mut foo_role_receptor_2) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role::Signature::role(TestRole::Foo)) .build() .await .expect("recipient messenger should be created"); // Create messenger to send a message to the given participant. let (sender, _) = delegate .messenger_builder(MessengerType::Unbound) .build() .await .expect("sending messenger should be created"); let message = TestMessage::Foo; let audience = Audience::Role(role::Signature::role(TestRole::Foo)); sender.message(message, audience).send().ack(); // Verify payload received by role members. verify_payload(message, &mut foo_role_receptor, None).await; verify_payload(message, &mut foo_role_receptor_2, None).await; } /// Ensures roles don't receive each other's messages. #[fuchsia_async::run_until_stalled(test)] async fn test_roles_exclusivity() { // Prepare a message hub. let delegate = test::message::create_hub(); // Create messengers who participate in roles let (_, mut foo_role_receptor) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role::Signature::role(TestRole::Foo)) .build() .await .expect("recipient messenger should be created"); let (_, mut bar_role_receptor) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role::Signature::role(TestRole::Bar)) .build() .await .expect("recipient messenger should be created"); // Create messenger to send a message to the given participant. let (sender, _) = delegate .messenger_builder(MessengerType::Unbound) .build() .await .expect("sending messenger should be created"); // Send messages to roles. { let message = TestMessage::Bar; let audience = Audience::Role(role::Signature::role(TestRole::Bar)); sender.message(message, audience).send().ack(); // Verify payload received by role members. verify_payload(message, &mut bar_role_receptor, None).await; } { let message = TestMessage::Foo; let audience = Audience::Role(role::Signature::role(TestRole::Foo)); sender.message(message, audience).send().ack(); // Verify payload received by role members. verify_payload(message, &mut foo_role_receptor, None).await; } } /// Ensures only role members receive messages directed to the role. #[fuchsia_async::run_until_stalled(test)] async fn test_roles_audience() { // Prepare a message hub. let delegate = test::message::create_hub(); // Create messenger who participate in a role let (_, mut foo_role_receptor) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role::Signature::role(TestRole::Foo)) .build() .await .expect("recipient messenger should be created"); // Create another messenger with no role to ensure messages are not routed // improperly to other messengers. let (_, mut outside_receptor) = delegate .messenger_builder(MessengerType::Unbound) .build() .await .expect("other messenger should be created"); let outside_signature = outside_receptor.get_signature(); // Create messenger to send a message to the given participant. let (sender, _) = delegate .messenger_builder(MessengerType::Unbound) .build() .await .expect("sending messenger should be created"); // Send message to role. { let message = TestMessage::Foo; let audience = Audience::Role(role::Signature::role(TestRole::Foo)); sender.message(message, audience).send().ack(); // Verify payload received by role members. verify_payload(message, &mut foo_role_receptor, None).await; } // Send message to outside messenger. { let message = TestMessage::Baz; let audience = Audience::Messenger(outside_signature); sender.message(message, audience).send().ack(); // Since outside messenger isn't part of the role, the next message should // be the one sent directly to it, rather than the role. verify_payload(message, &mut outside_receptor, None).await; } } #[fuchsia_async::run_until_stalled(test)] async fn test_anonymous_roles() { // Prepare a message hub. let delegate = test::message::create_hub(); // Create anonymous role. let role = delegate.create_role().await.expect("Role should be returned"); // Create messenger who participates in role. let (_, mut role_receptor) = delegate .messenger_builder(MessengerType::Unbound) .add_role(role) .build() .await .expect("recipient messenger should be created"); // Create messenger to send a message to the given participant. let (sender, _) = delegate .messenger_builder(MessengerType::Unbound) .build() .await .expect("sending messenger should be created"); // Send messages to role. let message = TestMessage::Bar; let audience = Audience::Role(role); sender.message(message, audience).send().ack(); // Verify payload received by role member. verify_payload(message, &mut role_receptor, None).await; } /// Ensures targeted messengers deliver payload to intended audience. #[fuchsia_async::run_until_stalled(test)] async fn test_targeted_messenger_client() { let test_message = TestMessage::Foo; // Prepare a message hub for sender and target. let delegate = test::message::create_hub(); // Create target messenger. let (_, mut target_receptor) = delegate .create(MessengerType::Unbound) .await .expect("receiving messenger should be created"); // Create targeted messenger. let targeted_messenger = TargetedMessengerClient::new( delegate .create(MessengerType::Unbound) .await .expect("sending messenger should be created") .0, Audience::Messenger(target_receptor.get_signature()), ); // Send message. targeted_messenger.message(test_message).send().ack(); // Receptor should receive the test message. verify_payload(test_message, &mut target_receptor, None).await; }
36.46235
100
0.658618
c150268cd36e78de29ae7f1403498650db84d367
5,637
//! The `hash` module provides functions for creating SHA-256 hashes. use { crate::sanitize::Sanitize, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, sha2::{Digest, Sha256}, std::{convert::TryFrom, fmt, mem, str::FromStr}, thiserror::Error, }; pub const HASH_BYTES: usize = 32; /// Maximum string length of a base58 encoded hash const MAX_BASE58_LEN: usize = 44; #[derive( Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash, AbiExample, )] #[repr(transparent)] pub struct Hash(pub(crate) [u8; HASH_BYTES]); #[derive(Clone, Default)] pub struct Hasher { hasher: Sha256, } impl Hasher { pub fn hash(&mut self, val: &[u8]) { self.hasher.update(val); } pub fn hashv(&mut self, vals: &[&[u8]]) { for val in vals { self.hash(val); } } pub fn result(self) -> Hash { // At the time of this writing, the sha2 library is stuck on an old version // of generic_array (0.9.0). Decouple ourselves with a clone to our version. Hash(<[u8; HASH_BYTES]>::try_from(self.hasher.finalize().as_slice()).unwrap()) } } impl Sanitize for Hash {} impl AsRef<[u8]> for Hash { fn as_ref(&self) -> &[u8] { &self.0[..] } } impl fmt::Debug for Hash { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", bs58::encode(self.0).into_string()) } } impl fmt::Display for Hash { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", bs58::encode(self.0).into_string()) } } #[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum ParseHashError { #[error("string decoded to wrong size for hash")] WrongSize, #[error("failed to decoded string to hash")] Invalid, } impl FromStr for Hash { type Err = ParseHashError; fn from_str(s: &str) -> Result<Self, Self::Err> { if s.len() > MAX_BASE58_LEN { return Err(ParseHashError::WrongSize); } let bytes = bs58::decode(s) .into_vec() .map_err(|_| ParseHashError::Invalid)?; if bytes.len() != mem::size_of::<Hash>() { Err(ParseHashError::WrongSize) } else { Ok(Hash::new(&bytes)) } } } impl Hash { pub fn new(hash_slice: &[u8]) -> Self { Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap()) } pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self { Self(hash_array) } /// unique Hash for tests and benchmarks. pub fn new_unique() -> Self { use crate::atomic_u64::AtomicU64; static I: AtomicU64 = AtomicU64::new(1); let mut b = [0u8; HASH_BYTES]; let i = I.fetch_add(1); b[0..8].copy_from_slice(&i.to_le_bytes()); Self::new(&b) } pub fn to_bytes(self) -> [u8; HASH_BYTES] { self.0 } } /// Return a Sha256 hash for the given data. pub fn hashv(vals: &[&[u8]]) -> Hash { // Perform the calculation inline, calling this from within a program is // not supported #[cfg(not(target_arch = "bpf"))] { let mut hasher = Hasher::default(); hasher.hashv(vals); hasher.result() } // Call via a system call to perform the calculation #[cfg(target_arch = "bpf")] { extern "C" { fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64; } let mut hash_result = [0; HASH_BYTES]; unsafe { sol_sha256( vals as *const _ as *const u8, vals.len() as u64, &mut hash_result as *mut _ as *mut u8, ); } Hash::new_from_array(hash_result) } } /// Return a Sha256 hash for the given data. pub fn hash(val: &[u8]) -> Hash { hashv(&[val]) } /// Return the hash of the given hash extended with the given value. pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash { let mut hash_data = id.as_ref().to_vec(); hash_data.extend_from_slice(val); hash(&hash_data) } #[cfg(test)] mod tests { use super::*; #[test] fn test_new_unique() { assert!(Hash::new_unique() != Hash::new_unique()); } #[test] fn test_hash_fromstr() { let hash = hash(&[1u8]); let mut hash_base58_str = bs58::encode(hash).into_string(); assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash)); hash_base58_str.push_str(&bs58::encode(hash.0).into_string()); assert_eq!( hash_base58_str.parse::<Hash>(), Err(ParseHashError::WrongSize) ); hash_base58_str.truncate(hash_base58_str.len() / 2); assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash)); hash_base58_str.truncate(hash_base58_str.len() / 2); assert_eq!( hash_base58_str.parse::<Hash>(), Err(ParseHashError::WrongSize) ); let input_too_big = bs58::encode(&[0xffu8; HASH_BYTES + 1]).into_string(); assert!(input_too_big.len() > MAX_BASE58_LEN); assert_eq!( input_too_big.parse::<Hash>(), Err(ParseHashError::WrongSize) ); let mut hash_base58_str = bs58::encode(hash.0).into_string(); assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash)); // throw some non-base58 stuff in there hash_base58_str.replace_range(..1, "I"); assert_eq!( hash_base58_str.parse::<Hash>(), Err(ParseHashError::Invalid) ); } }
26.218605
86
0.573887
ebed3d732bddba00ad8cfe17cfebe1373d9fb7d6
202
//! The module for the Wasmtime CLI commands. mod compile; mod config; mod run; mod settings; mod wasm2obj; mod wast; pub use self::{compile::*, config::*, run::*, settings::*, wasm2obj::*, wast::*};
18.363636
81
0.663366
fe0d5c7618af48caece2a00b0d869f04db4445fa
956
extern crate bit_reverse; extern crate bit_set; extern crate bytecount; #[macro_use] extern crate colour; #[macro_use] extern crate exec_time; extern crate intbits; extern crate itertools; extern crate num_integer; mod day01; mod day02; mod day03; mod day04; mod day05; mod day06; mod day07; mod day08; mod day09; mod day10; mod day11; mod day12; mod day13; mod day14; mod day15; mod day16; mod day17; mod day18; mod day19; mod day20; mod day21; mod day22; mod day23; mod day24; mod day25; fn main() { day01::run(); day02::run(); day03::run(); day04::run(); day05::run(); day06::run(); day07::run(); day08::run(); day09::run(); day10::run(); day11::run(); day12::run(); day13::run(); day14::run(); day15::run(); day16::run(); day17::run(); day18::run(); day19::run(); day20::run(); day21::run(); day22::run(); day23::run(); day24::run(); day25::run(); }
14.707692
25
0.607741
0eddc3e3bb36a236d18207fe71bf5921bd307398
4,065
use crate::SpecificSocketAddress; /// IPv6 socket address. /// /// This includes an IPv6 address and a 16-bit port number. #[derive(Clone)] #[repr(C)] pub struct Inet6SocketAddress { /// The inner C-compatible socket address. inner: libc::sockaddr_in6, } impl Inet6SocketAddress { /// Create an IPv6 socket address. pub fn new(ip: std::net::Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> Self { let inner = libc::sockaddr_in6 { sin6_family: Self::static_family(), sin6_addr: libc::in6_addr { s6_addr: ip.octets() }, sin6_port: port.to_be(), sin6_flowinfo: flowinfo, sin6_scope_id: scope_id, }; Self::from_raw(inner) } /// Create an IPv6 socket address from a [`libc::sockaddr_in6`]. pub fn from_raw(inner: libc::sockaddr_in6) -> Self { Self { inner } } /// Convert the [`SocketAddress`] into raw [`libc`] parts. pub fn into_raw(self) -> libc::sockaddr_in6 { self.inner } /// Get the IP address associated with the socket address. pub fn ip(&self) -> std::net::Ipv6Addr { self.inner.sin6_addr.s6_addr.into() } /// Set the IP address associated with the socket address. pub fn set_ip(&mut self, ip: std::net::Ipv6Addr) { self.inner.sin6_addr.s6_addr = ip.octets(); } /// Get the port number associated with the socket address. pub fn port(&self) -> u16 { u16::from_be(self.inner.sin6_port) } /// Set the port number associated with the socket address. pub fn set_port(&mut self, port: u16) { self.inner.sin6_port = port.to_be(); } /// Get the flow information associated with the socket address. fn flowinfo(&self) -> u32 { self.inner.sin6_flowinfo } /// Set the flow information associated with the socket address. pub fn set_flowinfo(&mut self, flowinfo: u32) { self.inner.sin6_flowinfo = flowinfo; } /// Get the scope ID associated with the socket address. fn scope_id(&self) -> u32 { self.inner.sin6_scope_id } /// Set the scope ID associated with the socket address. pub fn set_scope_id(&mut self, scope_id: u32) { self.inner.sin6_scope_id = scope_id; } } impl SpecificSocketAddress for Inet6SocketAddress { fn static_family() -> libc::sa_family_t { libc::AF_INET6 as libc::sa_family_t } } unsafe impl crate::AsSocketAddress for Inet6SocketAddress { fn as_sockaddr(&self) -> *const libc::sockaddr { &self.inner as *const _ as *const _ } fn as_sockaddr_mut(address: &mut std::mem::MaybeUninit<Self>) -> *mut libc::sockaddr { unsafe { &mut address.as_mut_ptr().as_mut().unwrap().inner as *mut _ as *mut _ } } fn len(&self) -> libc::socklen_t { Self::max_len() } fn finalize(address: std::mem::MaybeUninit<Self>, len: libc::socklen_t) -> std::io::Result<Self> { unsafe { let address = address.assume_init(); if address.family() != Self::static_family() { return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "wrong address family, expeced AF_INET6")); } if len != Self::max_len() { return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "wrong address size")); } Ok(address) } } fn max_len() -> libc::socklen_t { std::mem::size_of::<libc::sockaddr_in6>() as libc::socklen_t } } impl From<Inet6SocketAddress> for crate::SocketAddress { fn from(other: Inet6SocketAddress) -> Self { Self::from(&other) } } impl From<&Inet6SocketAddress> for crate::SocketAddress { fn from(other: &Inet6SocketAddress) -> Self { Self::from_other(other) } } impl From<std::net::SocketAddrV6> for Inet6SocketAddress { fn from(other: std::net::SocketAddrV6) -> Self { Self::from(&other) } } impl From<&std::net::SocketAddrV6> for Inet6SocketAddress { fn from(other: &std::net::SocketAddrV6) -> Self { Self::new(*other.ip(), other.port(), other.flowinfo(), other.scope_id()) } } impl From<Inet6SocketAddress> for std::net::SocketAddrV6 { fn from(other: Inet6SocketAddress) -> Self { Self::from(&other) } } impl From<&Inet6SocketAddress> for std::net::SocketAddrV6 { fn from(other: &Inet6SocketAddress) -> Self { Self::new(other.ip(), other.port(), other.flowinfo(), other.scope_id()) } }
27.281879
111
0.688069
db45131e0f50ffa6a769560011887e696c903779
7,701
use super::*; use core::{ fmt::{Debug, Display, Formatter, Result as FmtResult}, format_args, }; #[derive(Debug)] struct Integer(isize); impl Display for Integer { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { if f.alternate() { f.write_fmt(format_args!("Integer value '{}'", self.0)) } else { f.write_fmt(format_args!("'{}'", self.0)) } } } #[derive(Debug)] struct Complex { r: Integer, i: Integer, } impl Complex { fn new(r: isize, i: isize) -> Self { Self { r: Integer(r), i: Integer(i), } } } impl Display for Complex { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { display_struct(f, &[(&"r", &self.r), (&'i', &self.i)]) } } struct Diverse { a: i8, b: char, c: usize, d: Option<&'static str>, e: String, f: Option<Integer>, g: Complex, } impl Display for Diverse { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { StructShow::new(f, Alternate::Inherit) .fields(&[(&"a", &self.a), (&'b', &self.b), (&"c".to_owned(), &self.c)]) .field_opt(&"d", &self.d) .field(&'e', &self.e) .field_opt(&"f".to_owned(), &self.f) .field(&"g", &self.g) .finish() } } struct Array4 { one: Integer, two: isize, three: char, four: Option<char>, } impl Display for Array4 { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { ListShow::new(f, Alternate::Inherit) .items(&[&self.one, &self.two, &self.three]) .item_opt(&self.four) .finish() } } struct Hector(Vec<isize>); impl Display for Hector { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { display_list_from_iter(f, self.0.iter()) } } struct Shmap(std::collections::BTreeMap<String, isize>); impl Display for Shmap { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { display_struct_from_iter(f, self.0.iter()) } } #[test] fn display() { assert_eq!("key: value", &format!("{}", Field::new("key", "value"))); assert_eq!("key: 12345", &format!("{}", Field::new("key", &12345))); assert_eq!( "coord: '-43'", &format!("{}", Field::new("coord", &Integer(-43))) ); assert_eq!( "point: {r: '1', i: '2'}", &format!("{}", Field::new("point", &Complex::new(1, 2))) ); assert_eq!( "{a: -1, b: z, c: 123456789, d: static string literal, e: Some text, f: '-19', g: {r: '-3', i: '4'}}", &format!("{}", Diverse{ a: -1, b: 'z', c: 123456789, d: Some("static string literal"), e: "Some text".to_string(), f: Some(Integer(-19)), g: Complex::new(-3, 4) }) ); assert_eq!( "{a: -1, b: z, c: 123456789, e: Some text, g: {r: '-3', i: '4'}}", &format!( "{}", Diverse { a: -1, b: 'z', c: 123456789, d: None, e: "Some text".to_string(), f: None, g: Complex::new(-3, 4) } ) ); assert_eq!( "['1', 2, c, s]", &format!( "{}", Array4 { one: Integer(1), two: 2, three: 'c', four: Some('s'), } ) ); assert_eq!( "['3', 4, d]", &format!( "{}", Array4 { one: Integer(3), two: 4, three: 'd', four: None, } ) ); assert_eq!( "[1, 2, 3, 4, 5]", &format!("{}", Hector((1..6).into_iter().collect())) ); assert_eq!( "{0: 0, 1: 2, 3: 5}", &format!( "{}", Shmap(maplit::btreemap! { "0".into() => 0, "1".into() => 2, "3".into() => 5, }) ) ) } #[test] fn display_alternative() { assert_eq!("key: value", &format!("{:#}", Field::new("key", "value"))); assert_eq!("key: 12345", &format!("{:#}", Field::new("key", &12345))); assert_eq!( "coord: Integer value '-43'", &format!("{:#}", Field::new("coord", &Integer(-43))) ); let point_output = r#"point: { r: Integer value '1', i: Integer value '2', }"#; assert_eq!( point_output, &format!("{:#}", Field::new("point", &Complex::new(1, 2))) ); assert_eq!( r#"{ a: -1, b: z, c: 123456789, d: static string literal, e: Some text, f: Integer value '-19', g: { r: Integer value '-3', i: Integer value '4', }, }"#, &format!( "{:#}", Diverse { a: -1, b: 'z', c: 123456789, d: Some("static string literal"), e: "Some text".to_string(), f: Some(Integer(-19)), g: Complex::new(-3, 4) } ) ); assert_eq!( r#"{ a: -1, b: z, c: 123456789, e: Some text, g: { r: Integer value '-3', i: Integer value '4', }, }"#, &format!( "{:#}", Diverse { a: -1, b: 'z', c: 123456789, d: None, e: "Some text".to_string(), f: None, g: Complex::new(-3, 4) } ) ); assert_eq!( r#"[ Integer value '5', 6, e, ]"#, &format!( "{:#}", Array4 { one: Integer(5), two: 6, three: 'e', four: None, } ) ); assert_eq!( r#"[ Integer value '7', 8, f, g, ]"#, &format!( "{:#}", Array4 { one: Integer(7), two: 8, three: 'f', four: Some('g'), } ) ); assert_eq!( r#"[ 1, 2, 3, 4, 5, ]"#, &format!("{:#}", Hector((1..6).into_iter().collect())) ); assert_eq!( r#"{ 0: 0, 1: 2, 3: 5, }"#, &format!( "{:#}", Shmap(maplit::btreemap! { "0".into() => 0, "1".into() => 2, "3".into() => 5, }) ) ); } #[test] fn debug() { assert_eq!( "\"key\": \"value\"", &format!("{:?}", Field::new("key", "value")) ); assert_eq!( "\"key\": 12345", &format!("{:?}", Field::new("key", &12345)) ); assert_eq!( "\"coord\": Integer(-43)", &format!("{:?}", Field::new("coord", &Integer(-43))) ); assert_eq!( "\"point\": Complex { r: Integer(1), i: Integer(2) }", &format!("{:?}", Field::new("point", &Complex::new(1, 2))) ); } #[test] fn debug_alternative() { assert_eq!( "\"key\": \"value\"", &format!("{:#?}", Field::new("key", "value")) ); assert_eq!( "\"key\": 12345", &format!("{:#?}", Field::new("key", &12345)) ); assert_eq!( r#""coord": Integer( -43, )"#, &format!("{:#?}", Field::new("coord", &Integer(-43))) ); let point_output = r#""point": Complex { r: Integer( 1, ), i: Integer( 2, ), }"#; assert_eq!( point_output, &format!("{:#?}", Field::new("point", &Complex::new(1, 2))) ); }
22.002857
110
0.390728
6a834c23efb57fd33cede1e279b657b2c82aeff0
137
pub fn read() -> usize { unsafe { let r: usize; asm!("csrr $0, mtval" : "=r"(r) : : : "volatile"); r } }
17.125
58
0.386861
e921dd1ba0636bec010bacdd41ed3cece7f457ec
11,240
// Lower-case ASCII 'a' is the first byte that has its highest bit set // after wrap-adding 0x1F: // // b'a' + 0x1F == 0x80 == 0b1000_0000 // b'z' + 0x1F == 0x98 == 0b1001_1000 // // Lower-case ASCII 'z' is the last byte that has its highest bit unset // after wrap-adding 0x05: // // b'a' + 0x05 == 0x66 == 0b0110_0110 // b'z' + 0x05 == 0x7F == 0b0111_1111 // // … except for 0xFB to 0xFF, but those are in the range of bytes // that have the highest bit unset again after adding 0x1F. // // So `(byte + 0x1f) & !(byte + 5)` has its highest bit set // iff `byte` is a lower-case ASCII letter. // // Lower-case ASCII letters all have the 0x20 bit set. // (Two positions right of 0x80, the highest bit.) // Unsetting that bit produces the same letter, in upper-case. // // Therefore: fn branchless_to_ascii_upper_case(byte: u8) -> u8 { byte & !( ( byte.wrapping_add(0x1f) & !byte.wrapping_add(0x05) & 0x80 ) >> 2 ) } macro_rules! benches { ($( fn $name: ident($arg: ident: &mut [u8]) $body: block )+ @iter $( $is_: ident, )+) => { benches! {@ $( fn $name($arg: &mut [u8]) $body )+ $( fn $is_(bytes: &mut [u8]) { bytes.iter().all(u8::$is_) } )+ } }; (@$( fn $name: ident($arg: ident: &mut [u8]) $body: block )+) => { benches!(mod short SHORT $($name $arg $body)+); benches!(mod medium MEDIUM $($name $arg $body)+); benches!(mod long LONG $($name $arg $body)+); }; (mod $mod_name: ident $input: ident $($name: ident $arg: ident $body: block)+) => { mod $mod_name { use super::*; $( #[bench] fn $name(bencher: &mut Bencher) { bencher.bytes = $input.len() as u64; bencher.iter(|| { let mut vec = $input.as_bytes().to_vec(); { let $arg = &mut vec[..]; black_box($body); } vec }) } )+ } } } use test::black_box; use test::Bencher; benches! { fn case00_alloc_only(_bytes: &mut [u8]) {} fn case01_black_box_read_each_byte(bytes: &mut [u8]) { for byte in bytes { black_box(*byte); } } fn case02_lookup_table(bytes: &mut [u8]) { for byte in bytes { *byte = ASCII_UPPERCASE_MAP[*byte as usize] } } fn case03_branch_and_subtract(bytes: &mut [u8]) { for byte in bytes { *byte = if b'a' <= *byte && *byte <= b'z' { *byte - b'a' + b'A' } else { *byte } } } fn case04_branch_and_mask(bytes: &mut [u8]) { for byte in bytes { *byte = if b'a' <= *byte && *byte <= b'z' { *byte & !0x20 } else { *byte } } } fn case05_branchless(bytes: &mut [u8]) { for byte in bytes { *byte = branchless_to_ascii_upper_case(*byte) } } fn case06_libcore(bytes: &mut [u8]) { bytes.make_ascii_uppercase() } fn case07_fake_simd_u32(bytes: &mut [u8]) { // SAFETY: transmuting a sequence of `u8` to `u32` is always fine let (before, aligned, after) = unsafe { bytes.align_to_mut::<u32>() }; for byte in before { *byte = branchless_to_ascii_upper_case(*byte) } for word in aligned { // FIXME: this is incorrect for some byte values: // addition within a byte can carry/overflow into the next byte. // Test case: b"\xFFz " *word &= !( ( word.wrapping_add(0x1f1f1f1f) & !word.wrapping_add(0x05050505) & 0x80808080 ) >> 2 ) } for byte in after { *byte = branchless_to_ascii_upper_case(*byte) } } fn case08_fake_simd_u64(bytes: &mut [u8]) { // SAFETY: transmuting a sequence of `u8` to `u64` is always fine let (before, aligned, after) = unsafe { bytes.align_to_mut::<u64>() }; for byte in before { *byte = branchless_to_ascii_upper_case(*byte) } for word in aligned { // FIXME: like above, this is incorrect for some byte values. *word &= !( ( word.wrapping_add(0x1f1f1f1f_1f1f1f1f) & !word.wrapping_add(0x05050505_05050505) & 0x80808080_80808080 ) >> 2 ) } for byte in after { *byte = branchless_to_ascii_upper_case(*byte) } } fn case09_mask_mult_bool_branchy_lookup_table(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { if b >= 0x80 { return false } match ASCII_CHARACTER_CLASS[b as usize] { L | Lx => true, _ => false, } } for byte in bytes { *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) } } fn case10_mask_mult_bool_lookup_table(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { match ASCII_CHARACTER_CLASS[b as usize] { L | Lx => true, _ => false } } for byte in bytes { *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) } } fn case11_mask_mult_bool_match_range(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { match b { b'a'..=b'z' => true, _ => false } } for byte in bytes { *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) } } fn case12_mask_shifted_bool_match_range(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { match b { b'a'..=b'z' => true, _ => false } } for byte in bytes { *byte &= !((is_ascii_lowercase(*byte) as u8) << 5) } } fn case13_subtract_shifted_bool_match_range(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { match b { b'a'..=b'z' => true, _ => false } } for byte in bytes { *byte -= (is_ascii_lowercase(*byte) as u8) << 5 } } fn case14_subtract_multiplied_bool_match_range(bytes: &mut [u8]) { fn is_ascii_lowercase(b: u8) -> bool { match b { b'a'..=b'z' => true, _ => false } } for byte in bytes { *byte -= (b'a' - b'A') * is_ascii_lowercase(*byte) as u8 } } @iter is_ascii, is_ascii_alphabetic, is_ascii_uppercase, is_ascii_lowercase, is_ascii_alphanumeric, is_ascii_digit, is_ascii_hexdigit, is_ascii_punctuation, is_ascii_graphic, is_ascii_whitespace, is_ascii_control, } macro_rules! repeat { ($s: expr) => { concat!($s, $s, $s, $s, $s, $s, $s, $s, $s, $s) } } const SHORT: &'static str = "Alice's"; const MEDIUM: &'static str = "Alice's Adventures in Wonderland"; const LONG: &'static str = repeat!(r#" La Guida di Bragia, a Ballad Opera for the Marionette Theatre (around 1850) Alice's Adventures in Wonderland (1865) Phantasmagoria and Other Poems (1869) Through the Looking-Glass, and What Alice Found There (includes "Jabberwocky" and "The Walrus and the Carpenter") (1871) The Hunting of the Snark (1876) Rhyme? And Reason? (1883) – shares some contents with the 1869 collection, including the long poem "Phantasmagoria" A Tangled Tale (1885) Sylvie and Bruno (1889) Sylvie and Bruno Concluded (1893) Pillow Problems (1893) What the Tortoise Said to Achilles (1895) Three Sunsets and Other Poems (1898) The Manlet (1903)[106] "#); const ASCII_UPPERCASE_MAP: [u8; 256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', b'Z', b'[', b'\\', b']', b'^', b'_', b'`', b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', b'X', b'Y', b'Z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; enum AsciiCharacterClass { C, // control Cw, // control whitespace W, // whitespace D, // digit L, // lowercase Lx, // lowercase hex digit U, // uppercase Ux, // uppercase hex digit P, // punctuation N, // Non-ASCII } use self::AsciiCharacterClass::*; static ASCII_CHARACTER_CLASS: [AsciiCharacterClass; 256] = [ // _0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _a _b _c _d _e _f C, C, C, C, C, C, C, C, C, Cw,Cw,C, Cw,Cw,C, C, // 0_ C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, // 1_ W, P, P, P, P, P, P, P, P, P, P, P, P, P, P, P, // 2_ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, P, // 3_ P, Ux,Ux,Ux,Ux,Ux,Ux,U, U, U, U, U, U, U, U, U, // 4_ U, U, U, U, U, U, U, U, U, U, U, P, P, P, P, P, // 5_ P, Lx,Lx,Lx,Lx,Lx,Lx,L, L, L, L, L, L, L, L, L, // 6_ L, L, L, L, L, L, L, L, L, L, L, P, P, P, P, C, // 7_ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, ];
31.931818
94
0.4879
22db22a9b82b128720f087bc319c7d2aab343095
84,634
// Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // All files in the project carrying such notice may not be copied, modified, or distributed // except according to those terms. //! This module defines the 32-Bit Windows Base APIs use ctypes::{c_char, c_int, c_long, c_void}; use shared::basetsd::{ DWORD64, DWORD_PTR, LONG_PTR, PDWORD64, PDWORD_PTR, PSIZE_T, PULONG_PTR, SIZE_T, UINT_PTR, ULONG_PTR, }; use shared::guiddef::GUID; use shared::minwindef::{ ATOM, BOOL, BYTE, DWORD, FARPROC, FILETIME, HFILE, HGLOBAL, HLOCAL, HMODULE, HRSRC, LPBOOL, LPBYTE, LPCVOID, LPDWORD, LPFILETIME, LPVOID, LPWORD, PBOOL, PDWORD, PUCHAR, PULONG, PUSHORT, UCHAR, UINT, ULONG, USHORT, WORD, }; use shared::windef::HWND; use um::cfgmgr32::MAX_PROFILE_LEN; use um::fileapi::STREAM_INFO_LEVELS; use um::libloaderapi::{ ENUMRESLANGPROCA, ENUMRESLANGPROCW, ENUMRESNAMEPROCA, ENUMRESTYPEPROCA, ENUMRESTYPEPROCW, }; use um::minwinbase::{ FILE_INFO_BY_HANDLE_CLASS, FINDEX_INFO_LEVELS, FINDEX_SEARCH_OPS, GET_FILEEX_INFO_LEVELS, LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE, LPSECURITY_ATTRIBUTES, PREASON_CONTEXT, }; use um::processthreadsapi::{ LPPROC_THREAD_ATTRIBUTE_LIST, LPSTARTUPINFOA, STARTUPINFOA, STARTUPINFOW, }; use um::winnt::{ BOOLEAN, CHAR, DWORDLONG, EXECUTION_STATE, FILE_ID_128, HANDLE, HRESULT, INT, LANGID, LARGE_INTEGER, LATENCY_TIME, LONG, LPCCH, LPCH, LPCSTR, LPCWSTR, LPOSVERSIONINFOEXA, LPOSVERSIONINFOEXW, LPSTR, LPWSTR, MAXLONG, PBOOLEAN, PCONTEXT, PCWSTR, PFIRMWARE_TYPE, PHANDLE, PIO_COUNTERS, PJOB_SET_ARRAY, PLUID, POWER_REQUEST_TYPE, PPERFORMANCE_DATA, PPROCESSOR_NUMBER, PQUOTA_LIMITS, PRTL_UMS_SCHEDULER_ENTRY_POINT, PSECURE_MEMORY_CACHE_CALLBACK, PSID, PSID_NAME_USE, PULONGLONG, PVOID, PWOW64_CONTEXT, PWOW64_LDT_ENTRY, PWSTR, RTL_UMS_THREAD_INFO_CLASS, STATUS_ABANDONED_WAIT_0, STATUS_USER_APC, STATUS_WAIT_0, SecurityAnonymous, SecurityDelegation, SecurityIdentification, SecurityImpersonation, THREAD_BASE_PRIORITY_IDLE, THREAD_BASE_PRIORITY_LOWRT, THREAD_BASE_PRIORITY_MAX, THREAD_BASE_PRIORITY_MIN, ULARGE_INTEGER, VOID, WAITORTIMERCALLBACK, WCHAR, WOW64_CONTEXT, }; #[cfg(target_arch = "x86")] use um::winnt::PLDT_ENTRY; use vc::vadefs::va_list; pub const FILE_BEGIN: DWORD = 0; pub const FILE_CURRENT: DWORD = 1; pub const FILE_END: DWORD = 2; pub const WAIT_FAILED: DWORD = 0xFFFFFFFF; pub const WAIT_OBJECT_0: DWORD = STATUS_WAIT_0 as u32; pub const WAIT_ABANDONED: DWORD = STATUS_ABANDONED_WAIT_0 as u32; pub const WAIT_ABANDONED_0: DWORD = STATUS_ABANDONED_WAIT_0 as u32; pub const WAIT_IO_COMPLETION: DWORD = STATUS_USER_APC as u32; pub const FILE_FLAG_WRITE_THROUGH: DWORD = 0x80000000; pub const FILE_FLAG_OVERLAPPED: DWORD = 0x40000000; pub const FILE_FLAG_NO_BUFFERING: DWORD = 0x20000000; pub const FILE_FLAG_RANDOM_ACCESS: DWORD = 0x10000000; pub const FILE_FLAG_SEQUENTIAL_SCAN: DWORD = 0x08000000; pub const FILE_FLAG_DELETE_ON_CLOSE: DWORD = 0x04000000; pub const FILE_FLAG_BACKUP_SEMANTICS: DWORD = 0x02000000; pub const FILE_FLAG_POSIX_SEMANTICS: DWORD = 0x01000000; pub const FILE_FLAG_SESSION_AWARE: DWORD = 0x00800000; pub const FILE_FLAG_OPEN_REPARSE_POINT: DWORD = 0x00200000; pub const FILE_FLAG_OPEN_NO_RECALL: DWORD = 0x00100000; pub const FILE_FLAG_FIRST_PIPE_INSTANCE: DWORD = 0x00080000; pub const FILE_FLAG_OPEN_REQUIRING_OPLOCK: DWORD = 0x00040000; pub const PROGRESS_CONTINUE: DWORD = 0; pub const PROGRESS_CANCEL: DWORD = 1; pub const PROGRESS_STOP: DWORD = 2; pub const PROGRESS_QUIET: DWORD = 3; pub const CALLBACK_CHUNK_FINISHED: DWORD = 0x00000000; pub const CALLBACK_STREAM_SWITCH: DWORD = 0x00000001; pub const COPY_FILE_FAIL_IF_EXISTS: DWORD = 0x00000001; pub const COPY_FILE_RESTARTABLE: DWORD = 0x00000002; pub const COPY_FILE_OPEN_SOURCE_FOR_WRITE: DWORD = 0x00000004; pub const COPY_FILE_ALLOW_DECRYPTED_DESTINATION: DWORD = 0x00000008; pub const COPY_FILE_COPY_SYMLINK: DWORD = 0x00000800; pub const COPY_FILE_NO_BUFFERING: DWORD = 0x00001000; pub const COPY_FILE_REQUEST_SECURITY_PRIVILEGES: DWORD = 0x00002000; pub const COPY_FILE_RESUME_FROM_PAUSE: DWORD = 0x00004000; pub const COPY_FILE_NO_OFFLOAD: DWORD = 0x00040000; pub const COPY_FILE_IGNORE_EDP_BLOCK: DWORD = 0x00400000; pub const COPY_FILE_IGNORE_SOURCE_ENCRYPTION: DWORD = 0x00800000; pub const REPLACEFILE_WRITE_THROUGH: DWORD = 0x00000001; pub const REPLACEFILE_IGNORE_MERGE_ERRORS: DWORD = 0x00000002; pub const REPLACEFILE_IGNORE_ACL_ERRORS: DWORD = 0x00000004; pub const PIPE_ACCESS_INBOUND: DWORD = 0x00000001; pub const PIPE_ACCESS_OUTBOUND: DWORD = 0x00000002; pub const PIPE_ACCESS_DUPLEX: DWORD = 0x00000003; pub const PIPE_CLIENT_END: DWORD = 0x00000000; pub const PIPE_SERVER_END: DWORD = 0x00000001; pub const PIPE_WAIT: DWORD = 0x00000000; pub const PIPE_NOWAIT: DWORD = 0x00000001; pub const PIPE_READMODE_BYTE: DWORD = 0x00000000; pub const PIPE_READMODE_MESSAGE: DWORD = 0x00000002; pub const PIPE_TYPE_BYTE: DWORD = 0x00000000; pub const PIPE_TYPE_MESSAGE: DWORD = 0x00000004; pub const PIPE_ACCEPT_REMOTE_CLIENTS: DWORD = 0x00000000; pub const PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008; pub const PIPE_UNLIMITED_INSTANCES: DWORD = 255; pub const SECURITY_ANONYMOUS: DWORD = SecurityAnonymous << 16; pub const SECURITY_IDENTIFICATION: DWORD = SecurityIdentification << 16; pub const SECURITY_IMPERSONATION: DWORD = SecurityImpersonation << 16; pub const SECURITY_DELEGATION: DWORD = SecurityDelegation << 16; pub const SECURITY_CONTEXT_TRACKING: DWORD = 0x00040000; pub const SECURITY_EFFECTIVE_ONLY: DWORD = 0x00080000; pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000; pub const SECURITY_VALID_SQOS_FLAGS: DWORD = 0x001F0000; FN!{stdcall PFIBER_START_ROUTINE( lpFiberParameter: LPVOID, ) -> ()} pub type LPFIBER_START_ROUTINE = PFIBER_START_ROUTINE; FN!{stdcall PFIBER_CALLOUT_ROUTINE( lpParameter: LPVOID, ) -> LPVOID} // FAIL_FAST_* #[cfg(target_arch = "x86")] pub type LPLDT_ENTRY = PLDT_ENTRY; #[cfg(not(target_arch = "x86"))] pub type LPLDT_ENTRY = LPVOID; // TODO - fix this for 32-bit //SP_SERIALCOMM //PST_* // PCF_* // SP_* // BAUD_* // DATABITS_* // STOPBITS_* // PARITY_* STRUCT!{struct COMMPROP { wPacketLength: WORD, wPacketVersion: WORD, dwServiceMask: DWORD, dwReserved1: DWORD, dwMaxTxQueue: DWORD, dwMaxRxQueue: DWORD, dwMaxBaud: DWORD, dwProvSubType: DWORD, dwProvCapabilities: DWORD, dwSettableParams: DWORD, dwSettableBaud: DWORD, wSettableData: WORD, wSettableStopParity: WORD, dwCurrentTxQueue: DWORD, dwCurrentRxQueue: DWORD, dwProvSpec1: DWORD, dwProvSpec2: DWORD, wcProvChar: [WCHAR; 1], }} pub type LPCOMMPROP = *mut COMMPROP; STRUCT!{struct COMSTAT { BitFields: DWORD, cbInQue: DWORD, cbOutQue: DWORD, }} BITFIELD!{COMSTAT BitFields: DWORD [ fCtsHold set_fCtsHold[0..1], fDsrHold set_fDsrHold[1..2], fRlsdHold set_fRlsdHold[2..3], fXoffHold set_fXoffHold[3..4], fXoffSent set_fXoffSent[4..5], fEof set_fEof[5..6], fTxim set_fTxim[6..7], fReserved set_fReserved[7..32], ]} pub type LPCOMSTAT = *mut COMSTAT; pub const DTR_CONTROL_DISABLE: DWORD = 0x00; pub const DTR_CONTROL_ENABLE: DWORD = 0x01; pub const DTR_CONTROL_HANDSHAKE: DWORD = 0x02; pub const RTS_CONTROL_DISABLE: DWORD = 0x00; pub const RTS_CONTROL_ENABLE: DWORD = 0x01; pub const RTS_CONTROL_HANDSHAKE: DWORD = 0x02; pub const RTS_CONTROL_TOGGLE: DWORD = 0x03; STRUCT!{struct DCB { DCBlength: DWORD, BaudRate: DWORD, BitFields: DWORD, wReserved: WORD, XonLim: WORD, XoffLim: WORD, ByteSize: BYTE, Parity: BYTE, StopBits: BYTE, XonChar: c_char, XoffChar: c_char, ErrorChar: c_char, EofChar: c_char, EvtChar: c_char, wReserved1: WORD, }} BITFIELD!{DCB BitFields: DWORD [ fBinary set_fBinary[0..1], fParity set_fParity[1..2], fOutxCtsFlow set_fOutxCtsFlow[2..3], fOutxDsrFlow set_fOutxDsrFlow[3..4], fDtrControl set_fDtrControl[4..6], fDsrSensitivity set_fDsrSensitivity[6..7], fTXContinueOnXoff set_fTXContinueOnXoff[7..8], fOutX set_fOutX[8..9], fInX set_fInX[9..10], fErrorChar set_fErrorChar[10..11], fNull set_fNull[11..12], fRtsControl set_fRtsControl[12..14], fAbortOnError set_fAbortOnError[14..15], fDummy2 set_fDummy2[15..32], ]} pub type LPDCB = *mut DCB; STRUCT!{struct COMMTIMEOUTS { ReadIntervalTimeout: DWORD, ReadTotalTimeoutMultiplier: DWORD, ReadTotalTimeoutConstant: DWORD, WriteTotalTimeoutMultiplier: DWORD, WriteTotalTimeoutConstant: DWORD, }} pub type LPCOMMTIMEOUTS = *mut COMMTIMEOUTS; STRUCT!{struct COMMCONFIG { dwSize: DWORD, wVersion: WORD, wReserved: WORD, dcb: DCB, dwProviderSubType: DWORD, dwProviderOffset: DWORD, dwProviderSize: DWORD, wcProviderData: [WCHAR; 1], }} pub type LPCOMMCONFIG = *mut COMMCONFIG; pub const GMEM_FIXED: UINT = 0x0000; pub const GMEM_MOVEABLE: UINT = 0x0002; pub const GMEM_NOCOMPACT: UINT = 0x0010; pub const GMEM_NODISCARD: UINT = 0x0020; pub const GMEM_ZEROINIT: UINT = 0x0040; pub const GMEM_MODIFY: UINT = 0x0080; pub const GMEM_DISCARDABLE: UINT = 0x0100; pub const GMEM_NOT_BANKED: UINT = 0x1000; pub const GMEM_SHARE: UINT = 0x2000; pub const GMEM_DDESHARE: UINT = 0x2000; pub const GMEM_NOTIFY: UINT = 0x4000; pub const GMEM_LOWER: UINT = GMEM_NOT_BANKED; pub const GMEM_VALID_FLAGS: UINT = 0x7F72; pub const GMEM_INVALID_HANDLE: UINT = 0x8000; pub const GHND: UINT = GMEM_MOVEABLE | GMEM_ZEROINIT; pub const GPTR: UINT = GMEM_FIXED | GMEM_ZEROINIT; pub const GMEM_DISCARDED: UINT = 0x4000; pub const GMEM_LOCKCOUNT: UINT = 0x00FF; STRUCT!{struct MEMORYSTATUS { dwLength: DWORD, dwMemoryLoad: DWORD, dwTotalPhys: SIZE_T, dwAvailPhys: SIZE_T, dwTotalPageFile: SIZE_T, dwAvailPageFile: SIZE_T, dwTotalVirtual: SIZE_T, dwAvailVirtual: SIZE_T, }} pub type LPMEMORYSTATUS = *mut MEMORYSTATUS; // NUMA_NO_PREFERRED_NODE pub const DEBUG_PROCESS: DWORD = 0x00000001; pub const DEBUG_ONLY_THIS_PROCESS: DWORD = 0x00000002; pub const CREATE_SUSPENDED: DWORD = 0x00000004; pub const DETACHED_PROCESS: DWORD = 0x00000008; pub const CREATE_NEW_CONSOLE: DWORD = 0x00000010; pub const NORMAL_PRIORITY_CLASS: DWORD = 0x00000020; pub const IDLE_PRIORITY_CLASS: DWORD = 0x00000040; pub const HIGH_PRIORITY_CLASS: DWORD = 0x00000080; pub const REALTIME_PRIORITY_CLASS: DWORD = 0x00000100; pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200; pub const CREATE_UNICODE_ENVIRONMENT: DWORD = 0x00000400; pub const CREATE_SEPARATE_WOW_VDM: DWORD = 0x00000800; pub const CREATE_SHARED_WOW_VDM: DWORD = 0x00001000; pub const CREATE_FORCEDOS: DWORD = 0x00002000; pub const BELOW_NORMAL_PRIORITY_CLASS: DWORD = 0x00004000; pub const ABOVE_NORMAL_PRIORITY_CLASS: DWORD = 0x00008000; pub const INHERIT_PARENT_AFFINITY: DWORD = 0x00010000; pub const INHERIT_CALLER_PRIORITY: DWORD = 0x00020000; pub const CREATE_PROTECTED_PROCESS: DWORD = 0x00040000; pub const EXTENDED_STARTUPINFO_PRESENT: DWORD = 0x00080000; pub const PROCESS_MODE_BACKGROUND_BEGIN: DWORD = 0x00100000; pub const PROCESS_MODE_BACKGROUND_END: DWORD = 0x00200000; pub const CREATE_BREAKAWAY_FROM_JOB: DWORD = 0x01000000; pub const CREATE_PRESERVE_CODE_AUTHZ_LEVEL: DWORD = 0x02000000; pub const CREATE_DEFAULT_ERROR_MODE: DWORD = 0x04000000; pub const CREATE_NO_WINDOW: DWORD = 0x08000000; pub const PROFILE_USER: DWORD = 0x10000000; pub const PROFILE_KERNEL: DWORD = 0x20000000; pub const PROFILE_SERVER: DWORD = 0x40000000; pub const CREATE_IGNORE_SYSTEM_DEFAULT: DWORD = 0x80000000; // STACK_SIZE_PARAM_IS_A_RESERVATION pub const THREAD_PRIORITY_LOWEST: DWORD = THREAD_BASE_PRIORITY_MIN; pub const THREAD_PRIORITY_BELOW_NORMAL: DWORD = THREAD_PRIORITY_LOWEST + 1; pub const THREAD_PRIORITY_NORMAL: DWORD = 0; pub const THREAD_PRIORITY_HIGHEST: DWORD = THREAD_BASE_PRIORITY_MAX; pub const THREAD_PRIORITY_ABOVE_NORMAL: DWORD = THREAD_PRIORITY_HIGHEST - 1; pub const THREAD_PRIORITY_ERROR_RETURN: DWORD = MAXLONG as u32; pub const THREAD_PRIORITY_TIME_CRITICAL: DWORD = THREAD_BASE_PRIORITY_LOWRT; pub const THREAD_PRIORITY_IDLE: DWORD = THREAD_BASE_PRIORITY_IDLE; pub const THREAD_MODE_BACKGROUND_BEGIN: DWORD = 0x00010000; pub const THREAD_MODE_BACKGROUND_END: DWORD = 0x00020000; pub const VOLUME_NAME_DOS: DWORD = 0x0; // VOLUME_NAME_* // FILE_NAME_* // JIT_DEBUG_* pub const DRIVE_UNKNOWN: DWORD = 0; pub const DRIVE_NO_ROOT_DIR: DWORD = 1; pub const DRIVE_REMOVABLE: DWORD = 2; pub const DRIVE_FIXED: DWORD = 3; pub const DRIVE_REMOTE: DWORD = 4; pub const DRIVE_CDROM: DWORD = 5; pub const DRIVE_RAMDISK: DWORD = 6; // pub fn GetFreeSpace(); pub const FILE_TYPE_UNKNOWN: DWORD = 0x0000; pub const FILE_TYPE_DISK: DWORD = 0x0001; pub const FILE_TYPE_CHAR: DWORD = 0x0002; pub const FILE_TYPE_PIPE: DWORD = 0x0003; pub const FILE_TYPE_REMOTE: DWORD = 0x8000; pub const STD_INPUT_HANDLE: DWORD = -10i32 as u32; pub const STD_OUTPUT_HANDLE: DWORD = -11i32 as u32; pub const STD_ERROR_HANDLE: DWORD = -12i32 as u32; pub const NOPARITY: BYTE = 0; pub const ODDPARITY: BYTE = 1; pub const EVENPARITY: BYTE = 2; pub const MARKPARITY: BYTE = 3; pub const SPACEPARITY: BYTE = 4; pub const ONESTOPBIT: BYTE = 0; pub const ONE5STOPBITS: BYTE = 1; pub const TWOSTOPBITS: BYTE = 2; pub const IGNORE: DWORD = 0; pub const INFINITE: DWORD = 0xFFFFFFFF; pub const CBR_110: DWORD = 110; pub const CBR_300: DWORD = 300; pub const CBR_600: DWORD = 600; pub const CBR_1200: DWORD = 1200; pub const CBR_2400: DWORD = 2400; pub const CBR_4800: DWORD = 4800; pub const CBR_9600: DWORD = 9600; pub const CBR_14400: DWORD = 14400; pub const CBR_19200: DWORD = 19200; pub const CBR_38400: DWORD = 38400; pub const CBR_56000: DWORD = 56000; pub const CBR_57600: DWORD = 57600; pub const CBR_115200: DWORD = 115200; pub const CBR_128000: DWORD = 128000; pub const CBR_256000: DWORD = 256000; // CE_* // IE_* // EV_* pub const SETXOFF: DWORD = 1; pub const SETXON: DWORD = 2; pub const SETRTS: DWORD = 3; pub const CLRRTS: DWORD = 4; pub const SETDTR: DWORD = 5; pub const CLRDTR: DWORD = 6; pub const RESETDEV: DWORD = 7; pub const SETBREAK: DWORD = 8; pub const CLRBREAK: DWORD = 9; pub const PURGE_TXABORT: DWORD = 0x0001; pub const PURGE_RXABORT: DWORD = 0x0002; pub const PURGE_TXCLEAR: DWORD = 0x0004; pub const PURGE_RXCLEAR: DWORD = 0x0008; pub const MS_CTS_ON: DWORD = 0x0010; pub const MS_DSR_ON: DWORD = 0x0020; pub const MS_RING_ON: DWORD = 0x0040; pub const MS_RLSD_ON: DWORD = 0x0080; // S_* // NMPWAIT_* // FS_* // OF_* pub const OFS_MAXPATHNAME: usize = 128; STRUCT!{struct OFSTRUCT { cBytes: BYTE, fFixedDisk: BYTE, nErrCode: WORD, Reserved1: WORD, Reserved2: WORD, szPathName: [CHAR; OFS_MAXPATHNAME], }} pub type POFSTRUCT = *mut OFSTRUCT; pub type LPOFSTRUCT = *mut OFSTRUCT; extern "system" { pub fn GlobalAlloc( uFlags: UINT, dwBytes: SIZE_T, ) -> HGLOBAL; pub fn GlobalReAlloc( hMem: HGLOBAL, dwBytes: SIZE_T, uFlags: UINT, ) -> HGLOBAL; pub fn GlobalSize( hMem: HGLOBAL, ) -> SIZE_T; pub fn GlobalFlags( hMem: HGLOBAL, ) -> UINT; pub fn GlobalLock( hMem: HGLOBAL, ) -> LPVOID; pub fn GlobalHandle( pMem: LPCVOID, ) -> HGLOBAL; pub fn GlobalUnlock( hMem: HGLOBAL, ) -> BOOL; pub fn GlobalFree( hMem: HGLOBAL, ) -> HGLOBAL; pub fn GlobalCompact( dwMinFree: DWORD, ) -> SIZE_T; pub fn GlobalFix( hMem: HGLOBAL, ); pub fn GlobalUnfix( hMem: HGLOBAL, ); pub fn GlobalWire( hMem: HGLOBAL, ) -> LPVOID; pub fn GlobalUnWire( hMem: HGLOBAL, ) -> BOOL; pub fn GlobalMemoryStatus( lpBuffer: LPMEMORYSTATUS, ); pub fn LocalAlloc( uFlags: UINT, uBytes: SIZE_T, ) -> HLOCAL; pub fn LocalReAlloc( hMem: HLOCAL, uBytes: SIZE_T, uFlags: UINT, ) -> HLOCAL; pub fn LocalLock( hMem: HLOCAL, ) -> LPVOID; pub fn LocalHandle( pMem: LPCVOID, ) -> HLOCAL; pub fn LocalUnlock( hMem: HLOCAL, ) -> BOOL; pub fn LocalSize( hMem: HLOCAL, ) -> SIZE_T; pub fn LocalFlags( hMem: HLOCAL, ) -> UINT; pub fn LocalFree( hMem: HLOCAL, ) -> HLOCAL; pub fn LocalShrink( hMem: HLOCAL, cbNewSize: UINT, ) -> SIZE_T; pub fn LocalCompact( uMinFree: UINT, ) -> SIZE_T; } // SCS_* extern "system" { pub fn GetBinaryTypeA( lpApplicationName: LPCSTR, lpBinaryType: LPDWORD, ) -> BOOL; pub fn GetBinaryTypeW( lpApplicationName: LPCWSTR, lpBinaryType: LPDWORD, ) -> BOOL; pub fn GetShortPathNameA( lpszLongPath: LPCSTR, lpszShortPath: LPSTR, cchBuffer: DWORD, ) -> DWORD; pub fn GetLongPathNameTransactedA( lpszShortPath: LPCSTR, lpszLongPath: LPSTR, cchBuffer: DWORD, hTransaction: HANDLE, ) -> DWORD; pub fn GetLongPathNameTransactedW( lpszShortPath: LPCWSTR, lpszLongPath: LPWSTR, cchBuffer: DWORD, hTransaction: HANDLE, ) -> DWORD; pub fn GetProcessAffinityMask( hProcess: HANDLE, lpProcessAffinityMask: PDWORD_PTR, lpSystemAffinityMask: PDWORD_PTR, ) -> BOOL; pub fn SetProcessAffinityMask( hProcess: HANDLE, dwProcessAffinityMask: DWORD, ) -> BOOL; pub fn GetProcessIoCounters( hProcess: HANDLE, lpIoCounters: PIO_COUNTERS, ) -> BOOL; pub fn GetProcessWorkingSetSize( hProcess: HANDLE, lpMinimumWorkingSetSize: PSIZE_T, lpMaximumWorkingSetSize: PSIZE_T, ) -> BOOL; pub fn SetProcessWorkingSetSize( hProcess: HANDLE, dwMinimumWorkingSetSize: SIZE_T, dwMaximumWorkingSetSize: SIZE_T, ) -> BOOL; pub fn FatalExit( ExitCode: c_int, ); pub fn SetEnvironmentStringsA( NewEnvironment: LPCH, ) -> BOOL; pub fn SwitchToFiber( lpFiber: LPVOID, ); pub fn DeleteFiber( lpFiber: LPVOID, ); pub fn ConvertFiberToThread() -> BOOL; pub fn CreateFiberEx( dwStackCommitSize: SIZE_T, dwStackReserveSize: SIZE_T, dwFlags: DWORD, lpStartAddress: LPFIBER_START_ROUTINE, lpParameter: LPVOID, ) -> LPVOID; pub fn ConvertThreadToFiberEx( lpParameter: LPVOID, dwFlags: DWORD, ) -> LPVOID; pub fn CreateFiber( dwStackSize: SIZE_T, lpStartAddress: LPFIBER_START_ROUTINE, lpParameter: LPVOID, ) -> LPVOID; pub fn ConvertThreadToFiber( lpParameter: LPVOID, ) -> LPVOID; } pub type PUMS_CONTEXT = *mut c_void; pub type PUMS_COMPLETION_LIST = *mut c_void; pub type UMS_THREAD_INFO_CLASS = RTL_UMS_THREAD_INFO_CLASS; pub type PUMS_THREAD_INFO_CLASS = *mut UMS_THREAD_INFO_CLASS; pub type PUMS_SCHEDULER_ENTRY_POINT = PRTL_UMS_SCHEDULER_ENTRY_POINT; STRUCT!{struct UMS_SCHEDULER_STARTUP_INFO { UmsVersion: ULONG, CompletionList: PUMS_COMPLETION_LIST, SchedulerProc: PUMS_SCHEDULER_ENTRY_POINT, SchedulerParam: PVOID, }} pub type PUMS_SCHEDULER_STARTUP_INFO = *mut UMS_SCHEDULER_STARTUP_INFO; STRUCT!{struct UMS_SYSTEM_THREAD_INFORMATION { UmsVersion: ULONG, ThreadUmsFlags: ULONG, }} BITFIELD!{UMS_SYSTEM_THREAD_INFORMATION ThreadUmsFlags: ULONG [ IsUmsSchedulerThread set_IsUmsSchedulerThread[0..1], IsUmsWorkerThread set_IsUmsWorkerThread[1..2], ]} pub type PUMS_SYSTEM_THREAD_INFORMATION = *mut UMS_SYSTEM_THREAD_INFORMATION; extern "system" { #[cfg(target_pointer_width = "64")] pub fn CreateUmsCompletionList( UmsCompletionList: *mut PUMS_COMPLETION_LIST, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn DequeueUmsCompletionListItems( UmsCompletionList: PUMS_COMPLETION_LIST, WaitTimeOut: DWORD, UmsThreadList: *mut PUMS_CONTEXT, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn GetUmsCompletionListEvent( UmsCompletionList: PUMS_COMPLETION_LIST, UmsCompletionEvent: PHANDLE, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn ExecuteUmsThread( UmsThread: PUMS_CONTEXT, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn UmsThreadYield( SchedulerParam: PVOID, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn DeleteUmsCompletionList( UmsCompletionList: PUMS_COMPLETION_LIST, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn GetCurrentUmsThread() -> PUMS_CONTEXT; #[cfg(target_pointer_width = "64")] pub fn GetNextUmsListItem( UmsContext: PUMS_CONTEXT, ) -> PUMS_CONTEXT; #[cfg(target_pointer_width = "64")] pub fn QueryUmsThreadInformation( UmsThread: PUMS_CONTEXT, UmsThreadInfoClass: UMS_THREAD_INFO_CLASS, UmsThreadInformation: PVOID, UmsThreadInformationLength: ULONG, ReturnLength: PULONG, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn SetUmsThreadInformation( UmsThread: PUMS_CONTEXT, UmsThreadInfoClass: UMS_THREAD_INFO_CLASS, UmsThreadInformation: PVOID, UmsThreadInformationLength: ULONG, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn DeleteUmsThreadContext( UmsThread: PUMS_CONTEXT, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn CreateUmsThreadContext( lpUmsThread: *mut PUMS_CONTEXT, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn EnterUmsSchedulingMode( SchedulerStartupInfo: PUMS_SCHEDULER_STARTUP_INFO, ) -> BOOL; #[cfg(target_pointer_width = "64")] pub fn GetUmsSystemThreadInformation( ThreadHandle: HANDLE, SystemThreadInfo: PUMS_SYSTEM_THREAD_INFORMATION, ) -> BOOL; pub fn SetThreadAffinityMask( hThread: HANDLE, dwThreadAffinityMask: DWORD_PTR, ) -> DWORD_PTR; pub fn SetProcessDEPPolicy( dwFlags: DWORD, ) -> BOOL; pub fn GetProcessDEPPolicy( hProcess: HANDLE, lpFlags: LPDWORD, lpPermanent: PBOOL, ) -> BOOL; pub fn RequestWakeupLatency( latency: LATENCY_TIME, ) -> BOOL; pub fn IsSystemResumeAutomatic() -> BOOL; pub fn GetThreadSelectorEntry( hThread: HANDLE, dwSelector: DWORD, lpSelectorEntry: LPLDT_ENTRY, ) -> BOOL; pub fn SetThreadExecutionState( esFlags: EXECUTION_STATE, ) -> EXECUTION_STATE; pub fn PowerCreateRequest( Context: PREASON_CONTEXT, ) -> HANDLE; pub fn PowerSetRequest( PowerRequest: HANDLE, RequestType: POWER_REQUEST_TYPE, ) -> BOOL; pub fn PowerClearRequest( PowerRequest: HANDLE, RequestType: POWER_REQUEST_TYPE, ) -> BOOL; pub fn RestoreLastError( dwErrCode: DWORD, ); } pub const FILE_SKIP_COMPLETION_PORT_ON_SUCCESS: UCHAR = 0x1; pub const FILE_SKIP_SET_EVENT_ON_HANDLE: UCHAR = 0x2; extern "system" { pub fn SetFileCompletionNotificationModes( FileHandle: HANDLE, Flags: UCHAR, ) -> BOOL; } pub const SEM_FAILCRITICALERRORS: UINT = 0x0001; pub const SEM_NOGPFAULTERRORBOX: UINT = 0x0002; pub const SEM_NOALIGNMENTFAULTEXCEPT: UINT = 0x0004; pub const SEM_NOOPENFILEERRORBOX: UINT = 0x8000; extern "system" { pub fn Wow64GetThreadContext( hThread: HANDLE, lpContext: PWOW64_CONTEXT, ) -> BOOL; pub fn Wow64SetThreadContext( hThread: HANDLE, lpContext: *const WOW64_CONTEXT, ) -> BOOL; pub fn Wow64GetThreadSelectorEntry( hThread: HANDLE, dwSelector: DWORD, lpSelectorEntry: PWOW64_LDT_ENTRY, ) -> BOOL; pub fn Wow64SuspendThread( hThread: HANDLE, ) -> DWORD; pub fn DebugSetProcessKillOnExit( KillOnExit: BOOL, ) -> BOOL; pub fn DebugBreakProcess( Process: HANDLE, ) -> BOOL; pub fn PulseEvent( hEvent: HANDLE, ) -> BOOL; pub fn GlobalDeleteAtom( nAtom: ATOM, ) -> ATOM; pub fn InitAtomTable( nSize: DWORD, ) -> BOOL; pub fn DeleteAtom( nAtom: ATOM, ) -> ATOM; pub fn SetHandleCount( uNumber: UINT, ) -> UINT; pub fn RequestDeviceWakeup( hDevice: HANDLE, ) -> BOOL; pub fn CancelDeviceWakeupRequest( hDevice: HANDLE, ) -> BOOL; pub fn GetDevicePowerState( hDevice: HANDLE, pfOn: *mut BOOL, ) -> BOOL; pub fn SetMessageWaitingIndicator( hMsgIndicator: HANDLE, ulMsgCount: ULONG, ) -> BOOL; pub fn SetFileShortNameA( hFile: HANDLE, lpShortName: LPCSTR, ) -> BOOL; pub fn SetFileShortNameW( hFile: HANDLE, lpShortName: LPCWSTR, ) -> BOOL; } pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001; pub const HANDLE_FLAG_PROTECT_FROM_CLOSE: DWORD = 0x00000002; extern "system" { pub fn LoadModule( lpModuleName: LPCSTR, lpParameterBlock: LPVOID, ) -> DWORD; pub fn WinExec( lpCmdLine: LPCSTR, uCmdShow: UINT, ) -> UINT; // ClearCommBreak // ClearCommError // SetupComm // EscapeCommFunction // GetCommConfig // GetCommMask // GetCommProperties // GetCommModemStatus // GetCommState // GetCommTimeouts // PurgeComm // SetCommBreak // SetCommConfig // SetCommMask // SetCommState // SetCommTimeouts // TransmitCommChar // WaitCommEvent pub fn SetTapePosition( hDevice: HANDLE, dwPositionMethod: DWORD, dwPartition: DWORD, dwOffsetLow: DWORD, dwOffsetHigh: DWORD, bImmediate: BOOL, ) -> DWORD; pub fn GetTapePosition( hDevice: HANDLE, dwPositionType: DWORD, lpdwPartition: LPDWORD, lpdwOffsetLow: LPDWORD, lpdwOffsetHigh: LPDWORD, ) -> DWORD; pub fn PrepareTape( hDevice: HANDLE, dwOperation: DWORD, bImmediate: BOOL, ) -> DWORD; pub fn EraseTape( hDevice: HANDLE, dwEraseType: DWORD, bImmediate: BOOL, ) -> DWORD; pub fn CreateTapePartition( hDevice: HANDLE, dwPartitionMethod: DWORD, dwCount: DWORD, dwSize: DWORD, ) -> DWORD; pub fn WriteTapemark( hDevice: HANDLE, dwTapemarkType: DWORD, dwTapemarkCount: DWORD, bImmediate: BOOL, ) -> DWORD; pub fn GetTapeStatus( hDevice: HANDLE, ) -> DWORD; pub fn GetTapeParameters( hDevice: HANDLE, dwOperation: DWORD, lpdwSize: LPDWORD, lpTapeInformation: LPVOID, ) -> DWORD; pub fn SetTapeParameters( hDevice: HANDLE, dwOperation: DWORD, lpTapeInformation: LPVOID, ) -> DWORD; pub fn MulDiv( nNumber: c_int, nNumerator: c_int, nDenominator: c_int, ) -> c_int; } ENUM!{enum DEP_SYSTEM_POLICY_TYPE { DEPPolicyAlwaysOff = 0, DEPPolicyAlwaysOn, DEPPolicyOptIn, DEPPolicyOptOut, DEPTotalPolicyCount, }} extern "system" { pub fn GetSystemDEPPolicy() -> DEP_SYSTEM_POLICY_TYPE; pub fn GetSystemRegistryQuota( pdwQuotaAllowed: PDWORD, pdwQuotaUsed: PDWORD, ) -> BOOL; pub fn FileTimeToDosDateTime( lpFileTime: *const FILETIME, lpFatDate: LPWORD, lpFatTime: LPWORD, ) -> BOOL; pub fn DosDateTimeToFileTime( wFatDate: WORD, wFatTime: WORD, lpFileTime: LPFILETIME, ) -> BOOL; pub fn FormatMessageA( dwFlags: DWORD, lpSource: LPCVOID, dwMessageId: DWORD, dwLanguageId: DWORD, lpBuffer: LPSTR, nSize: DWORD, Arguments: *mut va_list, ) -> DWORD; pub fn FormatMessageW( dwFlags: DWORD, lpSource: LPCVOID, dwMessageId: DWORD, dwLanguageId: DWORD, lpBuffer: LPWSTR, nSize: DWORD, Arguments: *mut va_list, ) -> DWORD; } pub const FORMAT_MESSAGE_IGNORE_INSERTS: DWORD = 0x00000200; pub const FORMAT_MESSAGE_FROM_STRING: DWORD = 0x00000400; pub const FORMAT_MESSAGE_FROM_HMODULE: DWORD = 0x00000800; pub const FORMAT_MESSAGE_FROM_SYSTEM: DWORD = 0x00001000; pub const FORMAT_MESSAGE_ARGUMENT_ARRAY: DWORD = 0x00002000; pub const FORMAT_MESSAGE_MAX_WIDTH_MASK: DWORD = 0x000000FF; pub const FORMAT_MESSAGE_ALLOCATE_BUFFER: DWORD = 0x00000100; extern "system" { pub fn CreateMailslotA( lpName: LPCSTR, nMaxMessageSize: DWORD, lReadTimeout: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> HANDLE; pub fn CreateMailslotW( lpName: LPCWSTR, nMaxMessageSize: DWORD, lReadTimeout: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> HANDLE; pub fn GetMailslotInfo( hMailslot: HANDLE, lpMaxMessageSize: LPDWORD, lpNextSize: LPDWORD, lpMessageCount: LPDWORD, lpReadTimeout: LPDWORD, ) -> BOOL; pub fn SetMailslotInfo( hMailslot: HANDLE, lReadTimeout: DWORD, ) -> BOOL; // pub fn EncryptFileA(); // pub fn EncryptFileW(); // pub fn DecryptFileA(); // pub fn DecryptFileW(); // pub fn FileEncryptionStatusA(); // pub fn FileEncryptionStatusW(); // pub fn OpenEncryptedFileRawA(); // pub fn OpenEncryptedFileRawW(); // pub fn ReadEncryptedFileRaw(); // pub fn WriteEncryptedFileRaw(); // pub fn CloseEncryptedFileRaw(); pub fn lstrcmpA( lpString1: LPCSTR, lpString2: LPCSTR, ) -> c_int; pub fn lstrcmpW( lpString1: LPCWSTR, lpString2: LPCWSTR, ) -> c_int; pub fn lstrcmpiA( lpString1: LPCSTR, lpString2: LPCSTR, ) -> c_int; pub fn lstrcmpiW( lpString1: LPCWSTR, lpString2: LPCWSTR, ) -> c_int; pub fn lstrcpynA( lpString1: LPSTR, lpString2: LPCSTR, iMaxLength: c_int, ) -> LPSTR; pub fn lstrcpynW( lpString1: LPWSTR, lpString2: LPCWSTR, iMaxLength: c_int, ) -> LPWSTR; pub fn lstrcpyA( lpString1: LPSTR, lpString2: LPCSTR, ) -> LPSTR; pub fn lstrcpyW( lpString1: LPWSTR, lpString2: LPCWSTR, ) -> LPWSTR; pub fn lstrcatA( lpString1: LPSTR, lpString2: LPCSTR, ) -> LPSTR; pub fn lstrcatW( lpString1: LPWSTR, lpString2: LPCWSTR, ) -> LPWSTR; pub fn lstrlenA( lpString: LPCSTR, ) -> c_int; pub fn lstrlenW( lpString: LPCWSTR, ) -> c_int; pub fn OpenFile( lpFileName: LPCSTR, lpReOpenBuff: LPOFSTRUCT, uStyle: UINT, ) -> HFILE; pub fn _lopen( lpPathName: LPCSTR, iReadWrite: c_int, ) -> HFILE; pub fn _lcreat( lpPathName: LPCSTR, iAttrubute: c_int, ) -> HFILE; pub fn _lread( hFile: HFILE, lpBuffer: LPVOID, uBytes: UINT, ) -> UINT; pub fn _lwrite( hFile: HFILE, lpBuffer: LPCCH, uBytes: UINT, ) -> UINT; pub fn _hread( hFile: HFILE, lpBuffer: LPVOID, lBytes: c_long, ) -> c_long; pub fn _hwrite( hFile: HFILE, lpBuffer: LPCCH, lBytes: c_long, ) -> c_long; pub fn _lclose( hFile: HFILE, ) -> HFILE; pub fn _llseek( hFile: HFILE, lOffset: LONG, iOrigin: c_int, ) -> LONG; // pub fn IsTextUnicode(); // pub fn SignalObjectAndWait(); pub fn BackupRead( hFile: HANDLE, lpBuffer: LPBYTE, nNumberOfBytesToRead: DWORD, lpNumberOfBytesRead: LPDWORD, bAbort: BOOL, bProcessSecurity: BOOL, lpContext: *mut LPVOID, ) -> BOOL; pub fn BackupSeek( hFile: HANDLE, dwLowBytesToSeek: DWORD, dwHighBytesToSeek: DWORD, lpdwLowByteSeeked: LPDWORD, lpdwHighByteSeeked: LPDWORD, lpContext: *mut LPVOID, ) -> BOOL; pub fn BackupWrite( hFile: HANDLE, lpBuffer: LPBYTE, nNumberOfBytesToWrite: DWORD, lpNumberOfBytesWritten: LPDWORD, bAbort: BOOL, bProcessSecurity: BOOL, lpContext: *mut LPVOID, ) -> BOOL; } //2886 pub const STARTF_USESHOWWINDOW: DWORD = 0x00000001; pub const STARTF_USESIZE: DWORD = 0x00000002; pub const STARTF_USEPOSITION: DWORD = 0x00000004; pub const STARTF_USECOUNTCHARS: DWORD = 0x00000008; pub const STARTF_USEFILLATTRIBUTE: DWORD = 0x00000010; pub const STARTF_RUNFULLSCREEN: DWORD = 0x00000020; pub const STARTF_FORCEONFEEDBACK: DWORD = 0x00000040; pub const STARTF_FORCEOFFFEEDBACK: DWORD = 0x00000080; pub const STARTF_USESTDHANDLES: DWORD = 0x00000100; pub const STARTF_USEHOTKEY: DWORD = 0x00000200; pub const STARTF_TITLEISLINKNAME: DWORD = 0x00000800; pub const STARTF_TITLEISAPPID: DWORD = 0x00001000; pub const STARTF_PREVENTPINNING: DWORD = 0x00002000; pub const STARTF_UNTRUSTEDSOURCE: DWORD = 0x00008000; STRUCT!{struct STARTUPINFOEXA { StartupInfo: STARTUPINFOA, lpAttributeList: LPPROC_THREAD_ATTRIBUTE_LIST, }} pub type LPSTARTUPINFOEXA = *mut STARTUPINFOEXA; STRUCT!{struct STARTUPINFOEXW { StartupInfo: STARTUPINFOW, lpAttributeList: LPPROC_THREAD_ATTRIBUTE_LIST, }} pub type LPSTARTUPINFOEXW = *mut STARTUPINFOEXW; extern "system" { pub fn OpenMutexA( dwDesiredAccess: DWORD, bInheritHandle: BOOL, lpName: LPCSTR, ) -> HANDLE; pub fn CreateSemaphoreA( lpSemaphoreAttributes: LPSECURITY_ATTRIBUTES, lInitialCount: LONG, lMaximumCount: LONG, lpName: LPCSTR, ) -> HANDLE; pub fn OpenSemaphoreA( dwDesiredAccess: DWORD, bInheritHandle: BOOL, lpName: LPCSTR, ) -> HANDLE; pub fn CreateWaitableTimerA( lpTimerAttributes: LPSECURITY_ATTRIBUTES, bManualReset: BOOL, lpTimerName: LPCSTR, ) -> HANDLE; pub fn OpenWaitableTimerA( dwDesiredAccess: DWORD, bInheritHandle: BOOL, lpTimerName: LPCSTR, ) -> HANDLE; pub fn CreateSemaphoreExA( lpSemaphoreAttributes: LPSECURITY_ATTRIBUTES, lInitialCount: LONG, lMaximumCount: LONG, lpName: LPCSTR, dwFlags: DWORD, dwDesiredAccess: DWORD, ) -> HANDLE; pub fn CreateWaitableTimerExA( lpTimerAttributes: LPSECURITY_ATTRIBUTES, lpTimerName: LPCSTR, dwFlags: DWORD, dwDesiredAccess: DWORD, ) -> HANDLE; pub fn CreateFileMappingA( hFile: HANDLE, lpAttributes: LPSECURITY_ATTRIBUTES, flProtect: DWORD, dwMaximumSizeHigh: DWORD, dwMaximumSizeLow: DWORD, lpName: LPCSTR, ) -> HANDLE; pub fn CreateFileMappingNumaA( hFile: HANDLE, lpFileMappingAttributes: LPSECURITY_ATTRIBUTES, flProtect: DWORD, dwMaximumSizeHigh: DWORD, dwMaximumSizeLow: DWORD, lpName: LPCSTR, nndPreferred: DWORD, ) -> HANDLE; pub fn OpenFileMappingA( dwDesiredAccess: DWORD, bInheritHandle: BOOL, lpName: LPCSTR, ) -> HANDLE; pub fn GetLogicalDriveStringsA( nBufferLength: DWORD, lpBuffer: LPSTR, ) -> DWORD; pub fn LoadPackagedLibrary( lpwLibFileName: LPCWSTR, Reserved: DWORD, ) -> HMODULE; pub fn QueryFullProcessImageNameA( hProcess: HANDLE, dwFlags: DWORD, lpExeName: LPSTR, lpdwSize: PDWORD, ) -> BOOL; pub fn QueryFullProcessImageNameW( hProcess: HANDLE, dwFlags: DWORD, lpExeName: LPWSTR, lpdwSize: PDWORD, ) -> BOOL; } //3233 extern "system" { pub fn GetStartupInfoA( lpStartupInfo: LPSTARTUPINFOA, ); pub fn GetFirmwareEnvironmentVariableA( lpName: LPCSTR, lpGuid: LPCSTR, pBuffer: PVOID, nSize: DWORD, ) -> DWORD; pub fn GetFirmwareEnvironmentVariableW( lpName: LPCWSTR, lpGuid: LPCWSTR, pBuffer: PVOID, nSize: DWORD, ) -> DWORD; pub fn GetFirmwareEnvironmentVariableExA( lpName: LPCSTR, lpGuid: LPCSTR, pBuffer: PVOID, nSize: DWORD, pdwAttribubutes: PDWORD, ) -> DWORD; pub fn GetFirmwareEnvironmentVariableExW( lpName: LPCWSTR, lpGuid: LPCWSTR, pBuffer: PVOID, nSize: DWORD, pdwAttribubutes: PDWORD, ) -> DWORD; pub fn SetFirmwareEnvironmentVariableA( lpName: LPCSTR, lpGuid: LPCSTR, pValue: PVOID, nSize: DWORD, ) -> BOOL; pub fn SetFirmwareEnvironmentVariableW( lpName: LPCWSTR, lpGuid: LPCWSTR, pValue: PVOID, nSize: DWORD, ) -> BOOL; pub fn SetFirmwareEnvironmentVariableExA( lpName: LPCSTR, lpGuid: LPCSTR, pValue: PVOID, nSize: DWORD, dwAttributes: DWORD, ) -> BOOL; pub fn SetFirmwareEnvironmentVariableExW( lpName: LPCWSTR, lpGuid: LPCWSTR, pValue: PVOID, nSize: DWORD, dwAttributes: DWORD, ) -> BOOL; pub fn GetFirmwareType( FirmwareType: PFIRMWARE_TYPE, ) -> BOOL; pub fn IsNativeVhdBoot( NativeVhdBoot: PBOOL, ) -> BOOL; pub fn FindResourceA( hModule: HMODULE, lpName: LPCSTR, lpType: LPCSTR, ) -> HRSRC; pub fn FindResourceExA( hModule: HMODULE, lpName: LPCSTR, lpType: LPCSTR, wLanguage: WORD, ) -> HRSRC; pub fn EnumResourceTypesA( hModule: HMODULE, lpEnumFunc: ENUMRESTYPEPROCA, lParam: LONG_PTR, ) -> BOOL; pub fn EnumResourceTypesW( hModule: HMODULE, lpEnumFunc: ENUMRESTYPEPROCW, lParam: LONG_PTR, ) -> BOOL; pub fn EnumResourceNamesA( hModule: HMODULE, lpType: LPCSTR, lpEnumFunc: ENUMRESNAMEPROCA, lParam: LONG_PTR, ) -> BOOL; pub fn EnumResourceLanguagesA( hModule: HMODULE, lpType: LPCSTR, lpName: LPCSTR, lpEnumFunc: ENUMRESLANGPROCA, lParam: LONG_PTR, ) -> BOOL; pub fn EnumResourceLanguagesW( hModule: HMODULE, lpType: LPCWSTR, lpName: LPCWSTR, lpEnumFunc: ENUMRESLANGPROCW, lParam: LONG_PTR, ) -> BOOL; pub fn BeginUpdateResourceA( pFileName: LPCSTR, bDeleteExistingResources: BOOL, ) -> HANDLE; pub fn BeginUpdateResourceW( pFileName: LPCWSTR, bDeleteExistingResources: BOOL, ) -> HANDLE; pub fn UpdateResourceA( hUpdate: HANDLE, lpType: LPCSTR, lpName: LPCSTR, wLanguage: WORD, lpData: LPVOID, cb: DWORD, ) -> BOOL; pub fn UpdateResourceW( hUpdate: HANDLE, lpType: LPCWSTR, lpName: LPCWSTR, wLanguage: WORD, lpData: LPVOID, cb: DWORD, ) -> BOOL; pub fn EndUpdateResourceA( hUpdate: HANDLE, fDiscard: BOOL, ) -> BOOL; pub fn EndUpdateResourceW( hUpdate: HANDLE, fDiscard: BOOL, ) -> BOOL; pub fn GlobalAddAtomA( lpString: LPCSTR, ) -> ATOM; pub fn GlobalAddAtomW( lpString: LPCWSTR, ) -> ATOM; pub fn GlobalAddAtomExA( lpString: LPCSTR, Flags: DWORD, ) -> ATOM; pub fn GlobalAddAtomExW( lpString: LPCWSTR, Flags: DWORD, ) -> ATOM; pub fn GlobalFindAtomA( lpString: LPCSTR, ) -> ATOM; pub fn GlobalFindAtomW( lpString: LPCWSTR, ) -> ATOM; pub fn GlobalGetAtomNameA( nAtom: ATOM, lpBuffer: LPSTR, nSize: c_int, ) -> UINT; pub fn GlobalGetAtomNameW( nAtom: ATOM, lpBuffer: LPWSTR, nSize: c_int, ) -> UINT; pub fn AddAtomA( lpString: LPCSTR, ) -> ATOM; pub fn AddAtomW( lpString: LPCWSTR, ) -> ATOM; pub fn FindAtomA( lpString: LPCSTR, ) -> ATOM; pub fn FindAtomW( lpString: LPCWSTR, ) -> ATOM; pub fn GetAtomNameA( nAtom: ATOM, lpBuffer: LPSTR, nSize: c_int, ) -> UINT; pub fn GetAtomNameW( nAtom: ATOM, lpBuffer: LPWSTR, nSize: c_int, ) -> UINT; pub fn GetProfileIntA( lpAppName: LPCSTR, lpKeyName: LPCSTR, nDefault: INT, ) -> UINT; pub fn GetProfileIntW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, nDefault: INT, ) -> UINT; pub fn GetProfileStringA( lpAppName: LPCSTR, lpKeyName: LPCSTR, lpDefault: LPCSTR, lpReturnedString: LPSTR, nSize: DWORD, ) -> DWORD; pub fn GetProfileStringW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, lpDefault: LPCWSTR, lpReturnedString: LPWSTR, nSize: DWORD, ) -> DWORD; pub fn WriteProfileStringA( lpAppName: LPCSTR, lpKeyName: LPCSTR, lpString: LPCSTR, ) -> BOOL; pub fn WriteProfileStringW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, lpString: LPCWSTR, ) -> BOOL; pub fn GetProfileSectionA( lpAppName: LPCSTR, lpReturnedString: LPSTR, nSize: DWORD, ) -> DWORD; pub fn GetProfileSectionW( lpAppName: LPCWSTR, lpReturnedString: LPWSTR, nSize: DWORD, ) -> DWORD; pub fn WriteProfileSectionA( lpAppName: LPCSTR, lpString: LPCSTR, ) -> BOOL; pub fn WriteProfileSectionW( lpAppName: LPCWSTR, lpString: LPCWSTR, ) -> BOOL; pub fn GetPrivateProfileIntA( lpAppName: LPCSTR, lpKeyName: LPCSTR, nDefault: INT, lpFileName: LPCSTR, ) -> UINT; pub fn GetPrivateProfileIntW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, nDefault: INT, lpFileName: LPCWSTR, ) -> UINT; pub fn GetPrivateProfileStringA( lpAppName: LPCSTR, lpKeyName: LPCSTR, lpDefault: LPCSTR, lpReturnedString: LPSTR, nSize: DWORD, lpFileName: LPCSTR, ) -> DWORD; pub fn GetPrivateProfileStringW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, lpDefault: LPCWSTR, lpReturnedString: LPWSTR, nSize: DWORD, lpFileName: LPCWSTR, ) -> DWORD; pub fn WritePrivateProfileStringA( lpAppName: LPCSTR, lpKeyName: LPCSTR, lpString: LPCSTR, lpFileName: LPCSTR, ) -> BOOL; pub fn WritePrivateProfileStringW( lpAppName: LPCWSTR, lpKeyName: LPCWSTR, lpString: LPCWSTR, lpFileName: LPCWSTR, ) -> BOOL; pub fn GetPrivateProfileSectionA( lpAppName: LPCSTR, lpReturnedString: LPSTR, nSize: DWORD, lpFileName: LPCSTR, ) -> DWORD; pub fn GetPrivateProfileSectionW( lpAppName: LPCWSTR, lpReturnedString: LPWSTR, nSize: DWORD, lpFileName: LPCWSTR, ) -> DWORD; pub fn WritePrivateProfileSectionA( lpAppName: LPCSTR, lpString: LPCSTR, lpFileName: LPCSTR, ) -> BOOL; pub fn WritePrivateProfileSectionW( lpAppName: LPCWSTR, lpString: LPCWSTR, lpFileName: LPCWSTR, ) -> BOOL; pub fn GetPrivateProfileSectionNamesA( lpszReturnBuffer: LPSTR, nSize: DWORD, lpFileName: LPCSTR, ) -> DWORD; pub fn GetPrivateProfileSectionNamesW( lpszReturnBuffer: LPWSTR, nSize: DWORD, lpFileName: LPCWSTR, ) -> DWORD; pub fn GetPrivateProfileStructA( lpszSection: LPCSTR, lpszKey: LPCSTR, lpStruct: LPVOID, uSizeStruct: UINT, szFile: LPCSTR, ) -> BOOL; pub fn GetPrivateProfileStructW( lpszSection: LPCWSTR, lpszKey: LPCWSTR, lpStruct: LPVOID, uSizeStruct: UINT, szFile: LPCWSTR, ) -> BOOL; pub fn WritePrivateProfileStructA( lpszSection: LPCSTR, lpszKey: LPCSTR, lpStruct: LPVOID, uSizeStruct: UINT, szFile: LPCSTR, ) -> BOOL; pub fn WritePrivateProfileStructW( lpszSection: LPCWSTR, lpszKey: LPCWSTR, lpStruct: LPVOID, uSizeStruct: UINT, szFile: LPCWSTR, ) -> BOOL; pub fn Wow64EnableWow64FsRedirection( Wow64FsEnableRedirection: BOOLEAN, ) -> BOOLEAN; pub fn SetDllDirectoryA( lpPathName: LPCSTR, ) -> BOOL; pub fn SetDllDirectoryW( lpPathName: LPCWSTR, ) -> BOOL; pub fn GetDllDirectoryA( nBufferLength: DWORD, lpBuffer: LPSTR, ) -> DWORD; pub fn GetDllDirectoryW( nBufferLength: DWORD, lpBuffer: LPWSTR, ) -> DWORD; pub fn SetSearchPathMode( Flags: DWORD, ) -> BOOL; pub fn CreateDirectoryExA( lpTemplateDirectory: LPCSTR, lpNewDirectory: LPCSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> BOOL; pub fn CreateDirectoryExW( lpTemplateDirectory: LPCWSTR, lpNewDirectory: LPCWSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> BOOL; pub fn CreateDirectoryTransactedA( lpTemplateDirectory: LPCSTR, lpNewDirectory: LPCSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, hTransaction: HANDLE, ) -> BOOL; pub fn CreateDirectoryTransactedW( lpTemplateDirectory: LPCWSTR, lpNewDirectory: LPCWSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, hTransaction: HANDLE, ) -> BOOL; pub fn RemoveDirectoryTransactedA( lpPathName: LPCSTR, hTransaction: HANDLE, ) -> BOOL; pub fn RemoveDirectoryTransactedW( lpPathName: LPCWSTR, hTransaction: HANDLE, ) -> BOOL; pub fn GetFullPathNameTransactedA( lpFileName: LPCSTR, nBufferLength: DWORD, lpBuffer: LPSTR, lpFilePart: *mut LPSTR, hTransaction: HANDLE, ) -> DWORD; pub fn GetFullPathNameTransactedW( lpFileName: LPCWSTR, nBufferLength: DWORD, lpBuffer: LPWSTR, lpFilePart: *mut LPWSTR, hTransaction: HANDLE, ); pub fn DefineDosDeviceA( dwFlags: DWORD, lpDeviceName: LPCSTR, lpTargetPath: LPCSTR, ) -> BOOL; pub fn QueryDosDeviceA( lpDeviceName: LPCSTR, lpTargetPath: LPSTR, ucchMax: DWORD, ) -> DWORD; pub fn CreateFileTransactedA( lpFileName: LPCSTR, dwDesiredAccess: DWORD, dwShareMode: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, dwCreationDisposition: DWORD, dwFlagsAndAttributes: DWORD, hTemplateFile: HANDLE, hTransaction: HANDLE, pusMiniVersion: PUSHORT, lpExtendedParameter: PVOID, ) -> HANDLE; pub fn CreateFileTransactedW( lpFileName: LPCWSTR, dwDesiredAccess: DWORD, dwShareMode: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, dwCreationDisposition: DWORD, dwFlagsAndAttributes: DWORD, hTemplateFile: HANDLE, hTransaction: HANDLE, pusMiniVersion: PUSHORT, lpExtendedParameter: PVOID, ) -> HANDLE; pub fn ReOpenFile( hOriginalFile: HANDLE, dwDesiredAccess: DWORD, dwShareMode: DWORD, dwFlags: DWORD, ) -> HANDLE; pub fn SetFileAttributesTransactedA( lpFileName: LPCSTR, dwFileAttributes: DWORD, hTransaction: HANDLE, ) -> BOOL; pub fn SetFileAttributesTransactedW( lpFileName: LPCWSTR, dwFileAttributes: DWORD, hTransaction: HANDLE, ) -> BOOL; pub fn GetFileAttributesTransactedA( lpFileName: LPCSTR, fInfoLevelId: GET_FILEEX_INFO_LEVELS, lpFileInformation: LPVOID, hTransaction: HANDLE, ) -> BOOL; pub fn GetFileAttributesTransactedW( lpFileName: LPCWSTR, fInfoLevelId: GET_FILEEX_INFO_LEVELS, lpFileInformation: LPVOID, hTransaction: HANDLE, ) -> BOOL; pub fn GetCompressedFileSizeTransactedA( lpFileName: LPCSTR, lpFileSizeHigh: LPDWORD, hTransaction: HANDLE, ) -> DWORD; pub fn GetCompressedFileSizeTransactedW( lpFileName: LPCWSTR, lpFileSizeHigh: LPDWORD, hTransaction: HANDLE, ); pub fn DeleteFileTransactedA( lpFileName: LPCSTR, hTransaction: HANDLE, ) -> BOOL; pub fn DeleteFileTransactedW( lpFileName: LPCWSTR, hTransaction: HANDLE, ) -> BOOL; pub fn CheckNameLegalDOS8Dot3A( lpName: LPCSTR, lpOemName: LPSTR, OemNameSize: DWORD, pbNameContainsSpaces: PBOOL, pbNameLegal: PBOOL, ) -> BOOL; pub fn CheckNameLegalDOS8Dot3W( lpName: LPCWSTR, lpOemName: LPSTR, OemNameSize: DWORD, pbNameContainsSpaces: PBOOL, pbNameLegal: PBOOL, ) -> BOOL; pub fn FindFirstFileTransactedA( lpFileName: LPCSTR, fInfoLevelId: FINDEX_INFO_LEVELS, lpFindFileData: LPVOID, fSearchOp: FINDEX_SEARCH_OPS, lpSearchFilter: LPVOID, dwAdditionalFlags: DWORD, hTransaction: HANDLE, ) -> HANDLE; pub fn FindFirstFileTransactedW( lpFileName: LPCWSTR, fInfoLevelId: FINDEX_INFO_LEVELS, lpFindFileData: LPVOID, fSearchOp: FINDEX_SEARCH_OPS, lpSearchFilter: LPVOID, dwAdditionalFlags: DWORD, hTransaction: HANDLE, ) -> HANDLE; pub fn CopyFileA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, bFailIfExists: BOOL, ) -> BOOL; pub fn CopyFileW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, bFailIfExists: BOOL, ) -> BOOL; } FN!{stdcall LPPROGRESS_ROUTINE( TotalFileSize: LARGE_INTEGER, TotalBytesTransferred: LARGE_INTEGER, StreamSize: LARGE_INTEGER, StreamBytesTransferred: LARGE_INTEGER, dwStreamNumber: DWORD, dwCallbackReason: DWORD, hSourceFile: HANDLE, hDestinationFile: HANDLE, lpData: LPVOID, ) -> DWORD} extern "system" { pub fn CopyFileExA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, pbCancel: LPBOOL, dwCopyFlags: DWORD, ) -> BOOL; pub fn CopyFileExW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, pbCancel: LPBOOL, dwCopyFlags: DWORD, ) -> BOOL; pub fn CopyFileTransactedA( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, pbCancel: LPBOOL, dwCopyFlags: DWORD, hTransaction: HANDLE, ) -> BOOL; pub fn CopyFileTransactedW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, pbCancel: LPBOOL, dwCopyFlags: DWORD, hTransaction: HANDLE, ) -> BOOL; } ENUM!{enum COPYFILE2_MESSAGE_TYPE { COPYFILE2_CALLBACK_NONE = 0, COPYFILE2_CALLBACK_CHUNK_STARTED, COPYFILE2_CALLBACK_CHUNK_FINISHED, COPYFILE2_CALLBACK_STREAM_STARTED, COPYFILE2_CALLBACK_STREAM_FINISHED, COPYFILE2_CALLBACK_POLL_CONTINUE, COPYFILE2_CALLBACK_ERROR, COPYFILE2_CALLBACK_MAX, }} ENUM!{enum COPYFILE2_MESSAGE_ACTION { COPYFILE2_PROGRESS_CONTINUE = 0, COPYFILE2_PROGRESS_CANCEL, COPYFILE2_PROGRESS_STOP, COPYFILE2_PROGRESS_QUIET, COPYFILE2_PROGRESS_PAUSE, }} ENUM!{enum COPYFILE2_COPY_PHASE { COPYFILE2_PHASE_NONE = 0, COPYFILE2_PHASE_PREPARE_SOURCE, COPYFILE2_PHASE_PREPARE_DEST, COPYFILE2_PHASE_READ_SOURCE, COPYFILE2_PHASE_WRITE_DESTINATION, COPYFILE2_PHASE_SERVER_COPY, COPYFILE2_PHASE_NAMEGRAFT_COPY, COPYFILE2_PHASE_MAX, }} STRUCT!{struct COPYFILE2_MESSAGE_ChunkStarted { dwStreamNumber: DWORD, dwReserved: DWORD, hSourceFile: HANDLE, hDestinationFile: HANDLE, uliChunkNumber: ULARGE_INTEGER, uliChunkSize: ULARGE_INTEGER, uliStreamSize: ULARGE_INTEGER, uliTotalFileSize: ULARGE_INTEGER, }} STRUCT!{struct COPYFILE2_MESSAGE_ChunkFinished { dwStreamNumber: DWORD, dwFlags: DWORD, hSourceFile: HANDLE, hDestinationFile: HANDLE, uliChunkNumber: ULARGE_INTEGER, uliChunkSize: ULARGE_INTEGER, uliStreamSize: ULARGE_INTEGER, uliStreamBytesTransferred: ULARGE_INTEGER, uliTotalFileSize: ULARGE_INTEGER, uliTotalBytesTransferred: ULARGE_INTEGER, }} STRUCT!{struct COPYFILE2_MESSAGE_StreamStarted { dwStreamNumber: DWORD, dwReserved: DWORD, hSourceFile: HANDLE, hDestinationFile: HANDLE, uliStreamSize: ULARGE_INTEGER, uliTotalFileSize: ULARGE_INTEGER, }} STRUCT!{struct COPYFILE2_MESSAGE_StreamFinished { dwStreamNumber: DWORD, dwReserved: DWORD, hSourceFile: HANDLE, hDestinationFile: HANDLE, uliStreamSize: ULARGE_INTEGER, uliStreamBytesTransferred: ULARGE_INTEGER, uliTotalFileSize: ULARGE_INTEGER, uliTotalBytesTransferred: ULARGE_INTEGER, }} STRUCT!{struct COPYFILE2_MESSAGE_PollContinue { dwReserved: DWORD, }} STRUCT!{struct COPYFILE2_MESSAGE_Error { CopyPhase: COPYFILE2_COPY_PHASE, dwStreamNumber: DWORD, hrFailure: HRESULT, dwReserved: DWORD, uliChunkNumber: ULARGE_INTEGER, uliStreamSize: ULARGE_INTEGER, uliStreamBytesTransferred: ULARGE_INTEGER, uliTotalFileSize: ULARGE_INTEGER, uliTotalBytesTransferred: ULARGE_INTEGER, }} UNION!{union COPYFILE2_MESSAGE_Info { [u64; 8] [u64; 9], ChunkStarted ChunkStarted_mut: COPYFILE2_MESSAGE_ChunkStarted, ChunkFinished ChunkFinished_mut: COPYFILE2_MESSAGE_ChunkFinished, StreamStarted StreamStarted_mut: COPYFILE2_MESSAGE_StreamStarted, StreamFinished StreamFinished_mut: COPYFILE2_MESSAGE_StreamFinished, PollContinue PollContinue_mut: COPYFILE2_MESSAGE_PollContinue, Error Error_mut: COPYFILE2_MESSAGE_Error, }} STRUCT!{struct COPYFILE2_MESSAGE { Type: COPYFILE2_MESSAGE_TYPE, dwPadding: DWORD, Info: COPYFILE2_MESSAGE_Info, }} FN!{stdcall PCOPYFILE2_PROGRESS_ROUTINE( pMessage: *const COPYFILE2_MESSAGE, pvCallbackContext: PVOID, ) -> COPYFILE2_MESSAGE_ACTION} STRUCT!{struct COPYFILE2_EXTENDED_PARAMETERS { dwSize: DWORD, dwCopyFlags: DWORD, pfCancel: *mut BOOL, pProgressRoutine: PCOPYFILE2_PROGRESS_ROUTINE, pvCallbackContext: PVOID, }} extern "system" { pub fn CopyFile2( pwszExistingFileName: PCWSTR, pwszNewFileName: PCWSTR, pExtendedParameters: *mut COPYFILE2_EXTENDED_PARAMETERS, ) -> HRESULT; pub fn MoveFileA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, ) -> BOOL; pub fn MoveFileW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, ) -> BOOL; pub fn MoveFileExA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, dwFlags: DWORD, ) -> BOOL; pub fn MoveFileExW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, dwFlags: DWORD, ) -> BOOL; pub fn MoveFileWithProgressA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, dwFlags: DWORD, ) -> BOOL; pub fn MoveFileWithProgressW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, dwFlags: DWORD, ) -> BOOL; pub fn MoveFileTransactedA( lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, dwFlags: DWORD, hTransaction: HANDLE, ) -> BOOL; pub fn MoveFileTransactedW( lpExistingFileName: LPCWSTR, lpNewFileName: LPCWSTR, lpProgressRoutine: LPPROGRESS_ROUTINE, lpData: LPVOID, dwFlags: DWORD, hTransaction: HANDLE, ) -> BOOL; } pub const MOVEFILE_REPLACE_EXISTING: DWORD = 0x00000001; pub const MOVEFILE_COPY_ALLOWED: DWORD = 0x00000002; pub const MOVEFILE_DELAY_UNTIL_REBOOT: DWORD = 0x00000004; pub const MOVEFILE_WRITE_THROUGH: DWORD = 0x00000008; pub const MOVEFILE_CREATE_HARDLINK: DWORD = 0x00000010; pub const MOVEFILE_FAIL_IF_NOT_TRACKABLE: DWORD = 0x00000020; extern "system" { pub fn ReplaceFileA( lpReplacedFileName: LPCSTR, lpReplacementFileName: LPCSTR, lpBackupFileName: LPCSTR, dwReplaceFlags: DWORD, lpExclude: LPVOID, lpReserved: LPVOID, ); pub fn ReplaceFileW( lpReplacedFileName: LPCWSTR, lpReplacementFileName: LPCWSTR, lpBackupFileName: LPCWSTR, dwReplaceFlags: DWORD, lpExclude: LPVOID, lpReserved: LPVOID, ); pub fn CreateHardLinkA( lpFileName: LPCSTR, lpExistingFileName: LPCSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> BOOL; pub fn CreateHardLinkW( lpFileName: LPCWSTR, lpExistingFileName: LPCWSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> BOOL; pub fn CreateHardLinkTransactedA( lpFileName: LPCSTR, lpExistingFileName: LPCSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, hTransaction: HANDLE, ) -> BOOL; pub fn CreateHardLinkTransactedW( lpFileName: LPCWSTR, lpExistingFileName: LPCWSTR, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, hTransaction: HANDLE, ); pub fn FindFirstStreamTransactedW( lpFileName: LPCWSTR, InfoLevel: STREAM_INFO_LEVELS, lpFindStreamData: LPVOID, dwFlags: DWORD, hTransaction: HANDLE, ) -> HANDLE; pub fn FindFirstFileNameTransactedW( lpFileName: LPCWSTR, dwFlags: DWORD, StringLength: LPDWORD, LinkName: PWSTR, hTransaction: HANDLE, ) -> HANDLE; pub fn CreateNamedPipeA( lpName: LPCSTR, dwOpenMode: DWORD, dwPipeMode: DWORD, nMaxInstances: DWORD, nOutBufferSize: DWORD, nInBufferSize: DWORD, nDefaultTimeOut: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, ) -> HANDLE; pub fn GetNamedPipeHandleStateA( hNamedPipe: HANDLE, lpState: LPDWORD, lpCurInstances: LPDWORD, lpMaxCollectionCount: LPDWORD, lpCollectDataTimeout: LPDWORD, lpUserName: LPSTR, nMaxUserNameSize: DWORD, ) -> BOOL; pub fn CallNamedPipeA( lpNamedPipeName: LPCSTR, lpInBuffer: LPVOID, nInBufferSize: DWORD, lpOutBuffer: LPVOID, nOutBufferSize: DWORD, lpBytesRead: LPDWORD, nTimeOut: DWORD, ) -> BOOL; pub fn WaitNamedPipeA( lpNamedPipeName: LPCSTR, nTimeOut: DWORD, ) -> BOOL; pub fn GetNamedPipeClientComputerNameA( Pipe: HANDLE, ClientComputerName: LPSTR, ClientComputerNameLength: ULONG, ) -> BOOL; pub fn GetNamedPipeClientProcessId( Pipe: HANDLE, ClientProcessId: PULONG, ) -> BOOL; pub fn GetNamedPipeClientSessionId( Pipe: HANDLE, ClientSessionId: PULONG, ) -> BOOL; pub fn GetNamedPipeServerProcessId( Pipe: HANDLE, ServerProcessId: PULONG, ) -> BOOL; pub fn GetNamedPipeServerSessionId( Pipe: HANDLE, ServerSessionId: PULONG, ) -> BOOL; pub fn SetVolumeLabelA( lpRootPathName: LPCSTR, lpVolumeName: LPCSTR, ) -> BOOL; pub fn SetVolumeLabelW( lpRootPathName: LPCWSTR, lpVolumeName: LPCWSTR, ) -> BOOL; pub fn SetFileBandwidthReservation( hFile: HANDLE, nPeriodMilliseconds: DWORD, nBytesPerPeriod: DWORD, bDiscardable: BOOL, lpTransferSize: LPDWORD, lpNumOutstandingRequests: LPDWORD, ) -> BOOL; pub fn GetFileBandwidthReservation( hFile: HANDLE, lpPeriodMilliseconds: LPDWORD, lpBytesPerPeriod: LPDWORD, pDiscardable: LPBOOL, lpTransferSize: LPDWORD, lpNumOutstandingRequests: LPDWORD, ) -> BOOL; // pub fn ClearEventLogA(); // pub fn ClearEventLogW(); // pub fn BackupEventLogA(); // pub fn BackupEventLogW(); // pub fn CloseEventLog(); pub fn DeregisterEventSource( hEventLog: HANDLE, ) -> BOOL; // pub fn NotifyChangeEventLog(); // pub fn GetNumberOfEventLogRecords(); // pub fn GetOldestEventLogRecord(); // pub fn OpenEventLogA(); // pub fn OpenEventLogW(); pub fn RegisterEventSourceA( lpUNCServerName: LPCSTR, lpSourceName: LPCSTR, ) -> HANDLE; pub fn RegisterEventSourceW( lpUNCServerName: LPCWSTR, lpSourceName: LPCWSTR, ) -> HANDLE; // pub fn OpenBackupEventLogA(); // pub fn OpenBackupEventLogW(); // pub fn ReadEventLogA(); // pub fn ReadEventLogW(); pub fn ReportEventA( hEventLog: HANDLE, wType: WORD, wCategory: WORD, dwEventID: DWORD, lpUserSid: PSID, wNumStrings: WORD, dwDataSize: DWORD, lpStrings: *mut LPCSTR, lpRawData: LPVOID, ) -> BOOL; pub fn ReportEventW( hEventLog: HANDLE, wType: WORD, wCategory: WORD, dwEventID: DWORD, lpUserSid: PSID, wNumStrings: WORD, dwDataSize: DWORD, lpStrings: *mut LPCWSTR, lpRawData: LPVOID, ) -> BOOL; // pub fn GetEventLogInformation(); // pub fn OperationStart(); // pub fn OperationEnd(); // pub fn AccessCheckAndAuditAlarmA(); // pub fn AccessCheckByTypeAndAuditAlarmA(); // pub fn AccessCheckByTypeResultListAndAuditAlarmA(); // pub fn AccessCheckByTypeResultListAndAuditAlarmByHandleA(); // pub fn ObjectOpenAuditAlarmA(); // pub fn ObjectPrivilegeAuditAlarmA(); // pub fn ObjectCloseAuditAlarmA(); // pub fn ObjectDeleteAuditAlarmA(); // pub fn PrivilegedServiceAuditAlarmA(); // pub fn AddConditionalAce(); // pub fn SetFileSecurityA(); // pub fn GetFileSecurityA(); pub fn ReadDirectoryChangesW( hDirectory: HANDLE, lpBuffer: LPVOID, nBufferLength: DWORD, bWatchSubtree: BOOL, dwNotifyFilter: DWORD, lpBytesReturned: LPDWORD, lpOverlapped: LPOVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE, ) -> BOOL; pub fn MapViewOfFileExNuma( hFileMappingObject: HANDLE, dwDesiredAccess: DWORD, dwFileOffsetHigh: DWORD, dwFileOffsetLow: DWORD, dwNumberOfBytesToMap: SIZE_T, lpBaseAddress: LPVOID, nndPreferred: DWORD, ) -> LPVOID; pub fn IsBadReadPtr( lp: *const VOID, ucb: UINT_PTR, ) -> BOOL; pub fn IsBadWritePtr( lp: LPVOID, ucb: UINT_PTR, ) -> BOOL; pub fn IsBadHugeReadPtr( lp: *const VOID, ucb: UINT_PTR, ) -> BOOL; pub fn IsBadHugeWritePtr( lp: LPVOID, ucb: UINT_PTR, ) -> BOOL; pub fn IsBadCodePtr( lpfn: FARPROC, ) -> BOOL; pub fn IsBadStringPtrA( lpsz: LPCSTR, ucchMax: UINT_PTR, ) -> BOOL; pub fn IsBadStringPtrW( lpsz: LPCWSTR, ucchMax: UINT_PTR, ) -> BOOL; pub fn LookupAccountSidA( lpSystemName: LPCSTR, Sid: PSID, Name: LPSTR, cchName: LPDWORD, ReferencedDomainName: LPSTR, cchReferencedDomainName: LPDWORD, peUse: PSID_NAME_USE, ) -> BOOL; pub fn LookupAccountSidW( lpSystemName: LPCWSTR, Sid: PSID, Name: LPWSTR, cchName: LPDWORD, ReferencedDomainName: LPWSTR, cchReferencedDomainName: LPDWORD, peUse: PSID_NAME_USE, ) -> BOOL; pub fn LookupAccountNameA( lpSystemName: LPCSTR, lpAccountName: LPCSTR, Sid: PSID, cbSid: LPDWORD, ReferencedDomainName: LPCSTR, cchReferencedDomainName: LPDWORD, peUse: PSID_NAME_USE, ) -> BOOL; pub fn LookupAccountNameW( lpSystemName: LPCWSTR, lpAccountName: LPCWSTR, Sid: PSID, cbSid: LPDWORD, ReferencedDomainName: LPCWSTR, cchReferencedDomainName: LPDWORD, peUse: PSID_NAME_USE, ) -> BOOL; // pub fn LookupAccountNameLocalA(); // pub fn LookupAccountNameLocalW(); // pub fn LookupAccountSidLocalA(); // pub fn LookupAccountSidLocalW(); pub fn LookupPrivilegeValueA( lpSystemName: LPCSTR, lpName: LPCSTR, lpLuid: PLUID, ) -> BOOL; pub fn LookupPrivilegeValueW( lpSystemName: LPCWSTR, lpName: LPCWSTR, lpLuid: PLUID, ) -> BOOL; pub fn LookupPrivilegeNameA( lpSystemName: LPCSTR, lpLuid: PLUID, lpName: LPSTR, cchName: LPDWORD, ) -> BOOL; pub fn LookupPrivilegeNameW( lpSystemName: LPCWSTR, lpLuid: PLUID, lpName: LPWSTR, cchName: LPDWORD, ) -> BOOL; // pub fn LookupPrivilegeDisplayNameA(); // pub fn LookupPrivilegeDisplayNameW(); pub fn BuildCommDCBA( lpDef: LPCSTR, lpDCB: LPDCB, ) -> BOOL; pub fn BuildCommDCBW( lpDef: LPCWSTR, lpDCB: LPDCB, ) -> BOOL; pub fn BuildCommDCBAndTimeoutsA( lpDef: LPCSTR, lpDCB: LPDCB, lpCommTimeouts: LPCOMMTIMEOUTS, ) -> BOOL; pub fn BuildCommDCBAndTimeoutsW( lpDef: LPCWSTR, lpDCB: LPDCB, lpCommTimeouts: LPCOMMTIMEOUTS, ) -> BOOL; pub fn CommConfigDialogA( lpszName: LPCSTR, hWnd: HWND, lpCC: LPCOMMCONFIG, ) -> BOOL; pub fn CommConfigDialogW( lpszName: LPCWSTR, hWnd: HWND, lpCC: LPCOMMCONFIG, ) -> BOOL; pub fn GetDefaultCommConfigA( lpszName: LPCSTR, lpCC: LPCOMMCONFIG, lpdwSize: LPDWORD, ) -> BOOL; pub fn GetDefaultCommConfigW( lpszName: LPCWSTR, lpCC: LPCOMMCONFIG, lpdwSize: LPDWORD, ) -> BOOL; pub fn SetDefaultCommConfigA( lpszName: LPCSTR, lpCC: LPCOMMCONFIG, dwSize: DWORD, ) -> BOOL; pub fn SetDefaultCommConfigW( lpszName: LPCWSTR, lpCC: LPCOMMCONFIG, dwSize: DWORD, ) -> BOOL; pub fn GetComputerNameA( lpBuffer: LPSTR, nSize: LPDWORD, ) -> BOOL; pub fn GetComputerNameW( lpBuffer: LPWSTR, nSize: LPDWORD, ) -> BOOL; pub fn DnsHostnameToComputerNameA( Hostname: LPCSTR, ComputerName: LPCSTR, nSize: LPDWORD, ) -> BOOL; pub fn DnsHostnameToComputerNameW( Hostname: LPCWSTR, ComputerName: LPWSTR, nSize: LPDWORD, ) -> BOOL; pub fn GetUserNameA( lpBuffer: LPSTR, pcbBuffer: LPDWORD, ) -> BOOL; pub fn GetUserNameW( lpBuffer: LPWSTR, pcbBuffer: LPDWORD, ) -> BOOL; } pub const LOGON32_LOGON_INTERACTIVE: DWORD = 2; pub const LOGON32_LOGON_NETWORK: DWORD = 3; pub const LOGON32_LOGON_BATCH: DWORD = 4; pub const LOGON32_LOGON_SERVICE: DWORD = 5; pub const LOGON32_LOGON_UNLOCK: DWORD = 7; pub const LOGON32_LOGON_NETWORK_CLEARTEXT: DWORD = 8; pub const LOGON32_LOGON_NEW_CREDENTIALS: DWORD = 9; pub const LOGON32_PROVIDER_DEFAULT: DWORD = 0; pub const LOGON32_PROVIDER_WINNT35: DWORD = 1; pub const LOGON32_PROVIDER_WINNT40: DWORD = 2; pub const LOGON32_PROVIDER_WINNT50: DWORD = 3; pub const LOGON32_PROVIDER_VIRTUAL: DWORD = 4; extern "system" { pub fn LogonUserA( lpUsername: LPCSTR, lpDomain: LPCSTR, lpPassword: LPCSTR, dwLogonType: DWORD, dwLogonProvider: DWORD, phToken: PHANDLE, ) -> BOOL; pub fn LogonUserW( lpUsername: LPCWSTR, lpDomain: LPCWSTR, lpPassword: LPCWSTR, dwLogonType: DWORD, dwLogonProvider: DWORD, phToken: PHANDLE, ) -> BOOL; pub fn LogonUserExA( lpUsername: LPCSTR, lpDomain: LPCSTR, lpPassword: LPCSTR, dwLogonType: DWORD, dwLogonProvider: DWORD, phToken: PHANDLE, ppLogonSid: *mut PSID, ppProfileBuffer: *mut PVOID, pdwProfileLength: LPDWORD, pQuotaLimits: PQUOTA_LIMITS, ) -> BOOL; pub fn LogonUserExW( lpUsername: LPCWSTR, lpDomain: LPCWSTR, lpPassword: LPCWSTR, dwLogonType: DWORD, dwLogonProvider: DWORD, phToken: PHANDLE, ppLogonSid: *mut PSID, ppProfileBuffer: *mut PVOID, pdwProfileLength: LPDWORD, pQuotaLimits: PQUOTA_LIMITS, ) -> BOOL; // pub fn CreateProcessWithLogonW(); // pub fn CreateProcessWithTokenW(); // pub fn IsTokenUntrusted(); pub fn RegisterWaitForSingleObject( phNewWaitObject: PHANDLE, hObject: HANDLE, Callback: WAITORTIMERCALLBACK, Context: PVOID, dwMilliseconds: ULONG, dwFlags: ULONG, ) -> BOOL; pub fn UnregisterWait( WaitHandle: HANDLE, ) -> BOOL; pub fn BindIoCompletionCallback( FileHandle: HANDLE, Function: LPOVERLAPPED_COMPLETION_ROUTINE, Flags: ULONG, ) -> BOOL; pub fn SetTimerQueueTimer( TimerQueue: HANDLE, Callback: WAITORTIMERCALLBACK, Parameter: PVOID, DueTime: DWORD, Period: DWORD, PreferIo: BOOL, ) -> HANDLE; pub fn CancelTimerQueueTimer( TimerQueue: HANDLE, Timer: HANDLE, ) -> BOOL; pub fn DeleteTimerQueue( TimerQueue: HANDLE, ) -> BOOL; // pub fn InitializeThreadpoolEnvironment(); // pub fn SetThreadpoolCallbackPool(); // pub fn SetThreadpoolCallbackCleanupGroup(); // pub fn SetThreadpoolCallbackRunsLong(); // pub fn SetThreadpoolCallbackLibrary(); // pub fn SetThreadpoolCallbackPriority(); // pub fn DestroyThreadpoolEnvironment(); // pub fn SetThreadpoolCallbackPersistent(); pub fn CreatePrivateNamespaceA( lpPrivateNamespaceAttributes: LPSECURITY_ATTRIBUTES, lpBoundaryDescriptor: LPVOID, lpAliasPrefix: LPCSTR, ) -> HANDLE; pub fn OpenPrivateNamespaceA( lpBoundaryDescriptor: LPVOID, lpAliasPrefix: LPCSTR, ) -> HANDLE; pub fn CreateBoundaryDescriptorA( Name: LPCSTR, Flags: ULONG, ) -> HANDLE; pub fn AddIntegrityLabelToBoundaryDescriptor( BoundaryDescriptor: *mut HANDLE, IntegrityLabel: PSID, ) -> BOOL; } pub const HW_PROFILE_GUIDLEN: usize = 39; // MAX_PROFILE_LEN pub const DOCKINFO_UNDOCKED: DWORD = 0x1; pub const DOCKINFO_DOCKED: DWORD = 0x2; pub const DOCKINFO_USER_SUPPLIED: DWORD = 0x4; pub const DOCKINFO_USER_UNDOCKED: DWORD = DOCKINFO_USER_SUPPLIED | DOCKINFO_UNDOCKED; pub const DOCKINFO_USER_DOCKED: DWORD = DOCKINFO_USER_SUPPLIED | DOCKINFO_DOCKED; STRUCT!{struct HW_PROFILE_INFOA { dwDockInfo: DWORD, szHwProfileGuid: [CHAR; HW_PROFILE_GUIDLEN], szHwProfileName: [CHAR; MAX_PROFILE_LEN], }} pub type LPHW_PROFILE_INFOA = *mut HW_PROFILE_INFOA; STRUCT!{struct HW_PROFILE_INFOW { dwDockInfo: DWORD, szHwProfileGuid: [WCHAR; HW_PROFILE_GUIDLEN], szHwProfileName: [WCHAR; MAX_PROFILE_LEN], }} pub type LPHW_PROFILE_INFOW = *mut HW_PROFILE_INFOW; extern "system" { pub fn GetCurrentHwProfileA( lpHwProfileInfo: LPHW_PROFILE_INFOA, ) -> BOOL; pub fn GetCurrentHwProfileW( lpHwProfileInfo: LPHW_PROFILE_INFOW, ) -> BOOL; pub fn VerifyVersionInfoA( lpVersionInformation: LPOSVERSIONINFOEXA, dwTypeMask: DWORD, dwlConditionMask: DWORDLONG, ) -> BOOL; pub fn VerifyVersionInfoW( lpVersionInformation: LPOSVERSIONINFOEXW, dwTypeMask: DWORD, dwlConditionMask: DWORDLONG, ) -> BOOL; } STRUCT!{struct SYSTEM_POWER_STATUS { ACLineStatus: BYTE, BatteryFlag: BYTE, BatteryLifePercent: BYTE, Reserved1: BYTE, BatteryLifeTime: DWORD, BatteryFullLifeTime: DWORD, }} pub type LPSYSTEM_POWER_STATUS = *mut SYSTEM_POWER_STATUS; extern "system" { pub fn GetSystemPowerStatus( lpSystemPowerStatus: LPSYSTEM_POWER_STATUS, ) -> BOOL; pub fn SetSystemPowerState( fSuspend: BOOL, fForce: BOOL, ) -> BOOL; pub fn MapUserPhysicalPagesScatter( VirtualAddresses: *mut PVOID, NumberOfPages: ULONG_PTR, PageArray: PULONG_PTR, ) -> BOOL; pub fn CreateJobObjectA( lpJobAttributes: LPSECURITY_ATTRIBUTES, lpName: LPCSTR, ) -> HANDLE; pub fn OpenJobObjectA( dwDesiredAccess: DWORD, bInheritHandle: BOOL, lpName: LPCSTR, ) -> HANDLE; pub fn CreateJobSet( NumJob: ULONG, UserJobSet: PJOB_SET_ARRAY, Flags: ULONG, ) -> BOOL; pub fn FindFirstVolumeA( lpszVolumeName: LPSTR, cchBufferLength: DWORD, ) -> HANDLE; pub fn FindNextVolumeA( hFindVolume: HANDLE, lpszVolumeName: LPSTR, cchBufferLength: DWORD, ) -> BOOL; pub fn FindFirstVolumeMountPointA( lpszRootPathName: LPCSTR, lpszVolumeMountPoint: LPSTR, cchBufferLength: DWORD, ) -> HANDLE; pub fn FindFirstVolumeMountPointW( lpszRootPathName: LPCWSTR, lpszVolumeMountPoint: LPWSTR, cchBufferLength: DWORD, ) -> HANDLE; pub fn FindNextVolumeMountPointA( hFindVolumeMountPoint: HANDLE, lpszVolumeMountPoint: LPSTR, cchBufferLength: DWORD, ) -> BOOL; pub fn FindNextVolumeMountPointW( hFindVolumeMountPoint: HANDLE, lpszVolumeMountPoint: LPWSTR, cchBufferLength: DWORD, ) -> BOOL; pub fn FindVolumeMountPointClose( hFindVolumeMountPoint: HANDLE, ) -> BOOL; pub fn SetVolumeMountPointA( lpszVolumeMountPoint: LPCSTR, lpszVolumeName: LPCSTR, ) -> BOOL; pub fn SetVolumeMountPointW( lpszVolumeMountPoint: LPCWSTR, lpszVolumeName: LPCWSTR, ) -> BOOL; pub fn DeleteVolumeMountPointA( lpszVolumeMountPoint: LPCSTR, ) -> BOOL; pub fn GetVolumeNameForVolumeMountPointA( lpszVolumeMountPoint: LPCSTR, lpszVolumeName: LPSTR, cchBufferLength: DWORD, ) -> BOOL; pub fn GetVolumePathNameA( lpszFileName: LPCSTR, lpszVolumePathName: LPSTR, cchBufferLength: DWORD, ) -> BOOL; pub fn GetVolumePathNamesForVolumeNameA( lpszVolumeName: LPCSTR, lpszVolumePathNames: LPCH, cchBufferLength: DWORD, lpcchReturnLength: PDWORD, ) -> BOOL; } // ACTCTX_FLAG_* STRUCT!{struct ACTCTXA { cbSize: ULONG, dwFlags: DWORD, lpSource: LPCSTR, wProcessorArchitecture: USHORT, wLangId: LANGID, lpAssemblyDirectory: LPCSTR, lpResourceName: LPCSTR, lpApplicationName: LPCSTR, hModule: HMODULE, }} pub type PACTCTXA = *mut ACTCTXA; STRUCT!{struct ACTCTXW { cbSize: ULONG, dwFlags: DWORD, lpSource: LPCWSTR, wProcessorArchitecture: USHORT, wLangId: LANGID, lpAssemblyDirectory: LPCWSTR, lpResourceName: LPCWSTR, lpApplicationName: LPCWSTR, hModule: HMODULE, }} pub type PACTCTXW = *mut ACTCTXW; pub type PCACTCTXA = *const ACTCTXA; pub type PCACTCTXW = *const ACTCTXW; extern "system" { pub fn CreateActCtxA( pActCtx: PCACTCTXA, ) -> HANDLE; pub fn CreateActCtxW( pActCtx: PCACTCTXW, ) -> HANDLE; pub fn AddRefActCtx( hActCtx: HANDLE, ); pub fn ReleaseActCtx( hActCtx: HANDLE, ); pub fn ZombifyActCtx( hActCtx: HANDLE, ) -> BOOL; pub fn ActivateActCtx( hActCtx: HANDLE, lpCookie: *mut ULONG_PTR, ) -> BOOL; pub fn DeactivateActCtx( dwFlags: DWORD, ulCookie: ULONG_PTR, ) -> BOOL; pub fn GetCurrentActCtx( lphActCtx: *mut HANDLE, ) -> BOOL; } STRUCT!{struct ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA { lpInformation: PVOID, lpSectionBase: PVOID, ulSectionLength: ULONG, lpSectionGlobalDataBase: PVOID, ulSectionGlobalDataLength: ULONG, }} pub type PACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA = *mut ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA; pub type PCACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA = *const ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA; STRUCT!{struct ACTCTX_SECTION_KEYED_DATA { cbSize: ULONG, ulDataFormatVersion: ULONG, lpData: PVOID, ulLength: ULONG, lpSectionGlobalData: PVOID, ulSectionGlobalDataLength: ULONG, lpSectionBase: PVOID, ulSectionTotalLength: ULONG, hActCtx: HANDLE, ulAssemblyRosterIndex: ULONG, ulFlags: ULONG, AssemblyMetadata: ACTCTX_SECTION_KEYED_DATA_ASSEMBLY_METADATA, }} pub type PACTCTX_SECTION_KEYED_DATA = *mut ACTCTX_SECTION_KEYED_DATA; pub type PCACTCTX_SECTION_KEYED_DATA = *const ACTCTX_SECTION_KEYED_DATA; extern "system" { pub fn FindActCtxSectionStringA( dwFlags: DWORD, lpExtensionGuid: *const GUID, ulSectionId: ULONG, lpStringToFind: LPCSTR, ReturnedData: PACTCTX_SECTION_KEYED_DATA, ) -> BOOL; pub fn FindActCtxSectionStringW( dwFlags: DWORD, lpExtensionGuid: *const GUID, ulSectionId: ULONG, lpStringToFind: LPCWSTR, ReturnedData: PACTCTX_SECTION_KEYED_DATA, ) -> BOOL; pub fn FindActCtxSectionGuid( dwFlags: DWORD, lpExtensionGuid: *const GUID, ulSectionId: ULONG, lpGuidToFind: *const GUID, ReturnedData: PACTCTX_SECTION_KEYED_DATA, ) -> BOOL; pub fn QueryActCtxW( dwFlags: DWORD, hActCtx: HANDLE, pvSubInstance: PVOID, ulInfoClass: ULONG, pvBuffer: PVOID, cbBuffer: SIZE_T, pcbWrittenOrRequired: *mut SIZE_T, ) -> BOOL; pub fn WTSGetActiveConsoleSessionId() -> DWORD; // pub fn WTSGetServiceSessionId(); // pub fn WTSIsServerContainer(); pub fn GetActiveProcessorGroupCount() -> WORD; pub fn GetMaximumProcessorGroupCount() -> WORD; pub fn GetActiveProcessorCount( GroupNumber: WORD, ) -> DWORD; pub fn GetMaximumProcessorCount( GroupNumber: WORD, ) -> DWORD; pub fn GetNumaProcessorNode( Processor: UCHAR, NodeNumber: PUCHAR, ) -> BOOL; pub fn GetNumaNodeNumberFromHandle( hFile: HANDLE, NodeNumber: PUSHORT, ) -> BOOL; pub fn GetNumaProcessorNodeEx( Processor: PPROCESSOR_NUMBER, NodeNumber: PUSHORT, ) -> BOOL; pub fn GetNumaNodeProcessorMask( Node: UCHAR, ProcessorMask: PULONGLONG, ) -> BOOL; pub fn GetNumaAvailableMemoryNode( Node: UCHAR, AvailableBytes: PULONGLONG, ) -> BOOL; pub fn GetNumaAvailableMemoryNodeEx( Node: USHORT, AvailableBytes: PULONGLONG, ) -> BOOL; pub fn GetNumaProximityNode( ProximityId: ULONG, NodeNumber: PUCHAR, ) -> BOOL; } FN!{stdcall APPLICATION_RECOVERY_CALLBACK( pvParameter: PVOID, ) -> DWORD} // RESTART_* // RECOVERY_* extern "system" { pub fn RegisterApplicationRecoveryCallback( pRecoveyCallback: APPLICATION_RECOVERY_CALLBACK, pvParameter: PVOID, dwPingInterval: DWORD, dwFlags: DWORD, ) -> HRESULT; pub fn UnregisterApplicationRecoveryCallback() -> HRESULT; pub fn RegisterApplicationRestart( pwzCommandline: PCWSTR, dwFlags: DWORD, ) -> HRESULT; pub fn UnregisterApplicationRestart() -> HRESULT; pub fn GetApplicationRecoveryCallback( hProcess: HANDLE, pRecoveryCallback: *mut APPLICATION_RECOVERY_CALLBACK, ppvParameter: *mut PVOID, pdwPingInterval: PDWORD, pdwFlags: PDWORD, ) -> HRESULT; pub fn GetApplicationRestartSettings( hProcess: HANDLE, pwzCommandline: PWSTR, pcchSize: PDWORD, pdwFlags: PDWORD, ) -> HRESULT; pub fn ApplicationRecoveryInProgress( pbCancelled: PBOOL, ) -> HRESULT; pub fn ApplicationRecoveryFinished( bSuccess: BOOL, ); } // FILE_BASIC_INFO, etc. extern "system" { pub fn GetFileInformationByHandleEx( hFile: HANDLE, FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, lpFileInformation: LPVOID, dwBufferSize: DWORD, ) -> BOOL; } ENUM!{enum FILE_ID_TYPE { FileIdType, ObjectIdType, ExtendedFileIdType, MaximumFileIdType, }} UNION!{union FILE_ID_DESCRIPTOR_u { [u64; 2], FileId FileId_mut: LARGE_INTEGER, ObjectId ObjectId_mut: GUID, ExtendedFileId ExtendedFileId_mut: FILE_ID_128, }} STRUCT!{struct FILE_ID_DESCRIPTOR { dwSize: DWORD, Type: FILE_ID_TYPE, u: FILE_ID_DESCRIPTOR_u, }} pub type LPFILE_ID_DESCRIPTOR = *mut FILE_ID_DESCRIPTOR; extern "system" { pub fn OpenFileById( hVolumeHint: HANDLE, lpFileId: LPFILE_ID_DESCRIPTOR, dwDesiredAccess: DWORD, dwShareMode: DWORD, lpSecurityAttributes: LPSECURITY_ATTRIBUTES, dwFlagsAndAttributes: DWORD, ) -> HANDLE; pub fn CreateSymbolicLinkA( lpSymlinkFileName: LPCSTR, lpTargetFileName: LPCSTR, dwFlags: DWORD, ) -> BOOLEAN; pub fn CreateSymbolicLinkW( lpSymlinkFileName: LPCWSTR, lpTargetFileName: LPCWSTR, dwFlags: DWORD, ) -> BOOLEAN; pub fn QueryActCtxSettingsW( dwFlags: DWORD, hActCtx: HANDLE, settingsNameSpace: PCWSTR, settingName: PCWSTR, pvBuffer: PWSTR, dwBuffer: SIZE_T, pdwWrittenOrRequired: *mut SIZE_T, ) -> BOOL; pub fn CreateSymbolicLinkTransactedA( lpSymlinkFileName: LPCSTR, lpTargetFileName: LPCSTR, dwFlags: DWORD, hTransaction: HANDLE, ) -> BOOLEAN; pub fn CreateSymbolicLinkTransactedW( lpSymlinkFileName: LPCWSTR, lpTargetFileName: LPCWSTR, dwFlags: DWORD, hTransaction: HANDLE, ) -> BOOLEAN; pub fn ReplacePartitionUnit( TargetPartition: PWSTR, SparePartition: PWSTR, Flags: ULONG, ) -> BOOL; pub fn AddSecureMemoryCacheCallback( pfnCallBack: PSECURE_MEMORY_CACHE_CALLBACK, ) -> BOOL; pub fn RemoveSecureMemoryCacheCallback( pfnCallBack: PSECURE_MEMORY_CACHE_CALLBACK, ) -> BOOL; pub fn CopyContext( Destination: PCONTEXT, ContextFlags: DWORD, Source: PCONTEXT, ) -> BOOL; pub fn InitializeContext( Buffer: PVOID, ContextFlags: DWORD, Context: *mut PCONTEXT, ContextLength: PDWORD, ) -> BOOL; pub fn GetEnabledXStateFeatures() -> DWORD64; pub fn GetXStateFeaturesMask( Context: PCONTEXT, FeatureMask: PDWORD64, ) -> BOOL; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn LocateXStateFeature( Context: PCONTEXT, FeatureId: DWORD, Length: PDWORD, ) -> PVOID; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn SetXStateFeaturesMask( Context: PCONTEXT, FeatureMask: DWORD64, ) -> BOOL; pub fn EnableThreadProfiling( ThreadHandle: HANDLE, Flags: DWORD, HardwareCounters: DWORD64, PerformanceDataHandle: *mut HANDLE, ) -> BOOL; pub fn DisableThreadProfiling( PerformanceDataHandle: HANDLE, ) -> DWORD; pub fn QueryThreadProfiling( ThreadHandle: HANDLE, Enabled: PBOOLEAN, ) -> DWORD; pub fn ReadThreadProfilingData( PerformanceDataHandle: HANDLE, Flags: DWORD, PerformanceData: PPERFORMANCE_DATA, ) -> DWORD; // intrinsic InterlockedIncrement // intrinsic InterlockedDecrement // intrinsic InterlockedExchange // intrinsic InterlockedExchangeAdd // intrinsic InterlockedExchangeSubtract // intrinsic InterlockedCompareExchange // intrinsic InterlockedAnd // intrinsic InterlockedOr // intrinsic InterlockedXor }
29.948337
98
0.656226
8fecc722d1b24e9304ccc51c1ce57aba5dfadb71
575
pub fn run() { // create variable with let let name = "Rust"; // <--- these are like constants let number = 42; // <---------' let mut num = 42; // "mut" allows for it to be mutable num = 42-2; const ID: i32 = 001; // this is a constant and it needs to be defined for its type? // `--> "integer 32 bit" let ( var1, var2 ) = ("Alpha", 3.14); // multiple variables at once // print variables println!("Program with {} and {} or {} or ID: {}", name, number, num, ID); println!("The {} is {}", var1, var2); }
31.944444
87
0.526957
75827bd3acde1e23c365c526ef70138decc0f5dd
562
use std::env; use colored::*; pub fn get_char() -> colored::ColoredString { let user_char = env::var("PROMPT_CHAR").unwrap_or("$".into()); let root_char = env::var("PROMPT_CHAR_ROOT").unwrap_or("#".into()); let user_char_color = env::var("PROMPT_CHAR_COLOR").unwrap_or("green".into()); let root_char_color = env::var("PROMPT_CHAR_ROOT_COLOR").unwrap_or("red".into()); let euid = unsafe { libc::geteuid() }; match euid { 0 => return root_char.color(root_char_color), _ => return user_char.color(user_char_color) } }
33.058824
85
0.653025
d798d5d170f5212840c074333dcdb731cc1ce25f
2,973
// wengwengweng use std::fs; use std::path::Path; use std::path::PathBuf; type Result<T> = ::std::result::Result<T, Error>; use crate::Error; pub fn exists(path: impl AsRef<Path>) -> bool { return path.as_ref().exists(); } pub fn assert_exists(path: impl AsRef<Path>) -> Result<()> { let path = path.as_ref(); if !exists(path) { return Err(Error::IO(format!("{} not found", path.display()))) } else { return Ok(()); } } pub fn is_dir(path: impl AsRef<Path>) -> bool { return path.as_ref().is_dir(); } pub fn is_file(path: impl AsRef<Path>) -> bool { return path.as_ref().is_file(); } pub fn mkdir(path: impl AsRef<Path>) -> Result<()> { let path = path.as_ref(); return fs::create_dir_all(path) .map_err(|_| Error::IO(format!("failed to create directory {}", path.display()))); } pub fn copy(p1: impl AsRef<Path>, p2: impl AsRef<Path>) -> Result<()> { let p1 = p1.as_ref(); let p2 = p2.as_ref(); fs::copy(p1, p2) .map_err(|_| Error::IO(format!("failed to copy {} to {}", p1.display(), p2.display())))?; return Ok(()); } pub fn copy_dir(p1: impl AsRef<Path>, p2: impl AsRef<Path>) -> Result<()> { let p1 = p1.as_ref(); let p2 = p2.as_ref(); let mut options = fs_extra::dir::CopyOptions::new(); options.overwrite = true; options.copy_inside = true; fs_extra::dir::copy(p1, p2, &options) .map_err(|_| Error::IO(format!("failed to copy {} to {}", p1.display(), p2.display())))?; return Ok(()); } pub fn write(path: impl AsRef<Path>, content: impl AsRef<[u8]>) -> Result<()> { let path = path.as_ref(); return fs::write(path, content) .map_err(|_| Error::IO(format!("failed to write file {}", path.display()))); } pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> { let path = path.as_ref(); return fs::read(&path) .map_err(|_| Error::IO(format!("failed to read file {}", path.display()))); } pub fn basename(path: impl AsRef<Path>) -> Result<String> { let path = path .as_ref(); return Ok( path .file_stem() .ok_or(Error::IO(format!("failed to get basename: {}", path.display())))? .to_str() .ok_or(Error::IO(format!("failed to get basename: {}", path.display())))? .to_owned() ); } pub fn base(path: impl AsRef<Path>) -> Result<PathBuf> { let path = path.as_ref(); return path .iter() .last() .map(PathBuf::from) .ok_or(Error::IO(format!("failed to get base: {}", path.display()))) ; } pub fn extname(path: impl AsRef<Path>) -> Result<String> { let path = path.as_ref(); return Ok(path .extension() .ok_or(Error::IO(format!("failed to get extname: {}", path.display())))? .to_os_string() .into_string().map_err(|_| Error::IO(format!("failed to get extname: {}", path.display())))? ); } pub fn assert_ext(path: impl AsRef<Path>, ext: &str) -> Result<()> { let path = path.as_ref(); if extname(path)? != String::from(ext) { return Err(Error::IO(format!("invalid file {}, expected a .{}", path.display(), ext))) } else { return Ok(()); } }
21.70073
94
0.616549
75c2c1b4e8e1c055b44d6c0bc8f20f132e5ce666
798
/* * Copyright © 2020-today Peter M. Stahl [email protected] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use include_dir::{include_dir, Dir}; pub const TSWANA_MODELS_DIRECTORY: Dir = include_dir!("models"); pub const TSWANA_TESTDATA_DIRECTORY: Dir = include_dir!("testdata");
36.272727
77
0.749373
e6a3474c27a1a41c1ad09ffc6886e416d17164b8
10,791
//! //! This module provides syscall definitions and the necessary resources to parse incoming //! syscalls extern crate syscall; pub use self::syscall::{data, error, flag, io, number, ptrace_event, scheme}; pub use self::driver::*; pub use self::fs::*; pub use self::futex::futex; pub use self::privilege::*; pub use self::process::*; pub use self::time::*; pub use self::validate::*; use self::data::{SigAction, TimeSpec}; use self::error::{Error, Result, ENOSYS}; use self::flag::{CloneFlags, MapFlags, PhysmapFlags, WaitFlags}; use self::number::*; use crate::context::ContextId; use crate::interrupt::InterruptStack; use crate::scheme::{FileHandle, SchemeNamespace}; /// Debug pub mod debug; /// Driver syscalls pub mod driver; /// Filesystem syscalls pub mod fs; /// Fast userspace mutex pub mod futex; /// Privilege syscalls pub mod privilege; /// Process syscalls pub mod process; /// Time syscalls pub mod time; /// Validate input pub mod validate; /// This function is the syscall handler of the kernel, it is composed of an inner function that returns a `Result<usize>`. After the inner function runs, the syscall /// function calls [`Error::mux`] on it. pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut InterruptStack) -> usize { #[inline(always)] fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut InterruptStack) -> Result<usize> { //SYS_* is declared in kernel/syscall/src/number.rs match a & SYS_CLASS { SYS_CLASS_FILE => { let fd = FileHandle::from(b); match a & SYS_ARG { SYS_ARG_SLICE => file_op_slice(a, fd, validate_slice(c as *const u8, d)?), SYS_ARG_MSLICE => file_op_mut_slice(a, fd, validate_slice_mut(c as *mut u8, d)?), _ => match a { SYS_CLOSE => close(fd), SYS_DUP => dup(fd, validate_slice(c as *const u8, d)?).map(FileHandle::into), SYS_DUP2 => dup2(fd, FileHandle::from(c), validate_slice(d as *const u8, e)?).map(FileHandle::into), SYS_FCNTL => fcntl(fd, c, d), SYS_FEXEC => fexec(fd, validate_slice(c as *const [usize; 2], d)?, validate_slice(e as *const [usize; 2], f)?), SYS_FRENAME => frename(fd, validate_slice(c as *const u8, d)?), SYS_FUNMAP => funmap(b, c), SYS_FMAP_OLD => { { let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); let name = current.name.lock(); println!("{:?} using deprecated fmap(...) call", core::str::from_utf8(&name)); } file_op(a, fd, c, d) }, SYS_FUNMAP_OLD => { { let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); let name = current.name.lock(); println!("{:?} using deprecated funmap(...) call", core::str::from_utf8(&name)); } funmap_old(b) }, _ => file_op(a, fd, c, d) } } }, SYS_CLASS_PATH => match a { SYS_OPEN => open(validate_slice(b as *const u8, c)?, d).map(FileHandle::into), SYS_CHMOD => chmod(validate_slice(b as *const u8, c)?, d as u16), SYS_RMDIR => rmdir(validate_slice(b as *const u8, c)?), SYS_UNLINK => unlink(validate_slice(b as *const u8, c)?), _ => Err(Error::new(ENOSYS)) }, _ => match a { SYS_YIELD => sched_yield(), SYS_NANOSLEEP => nanosleep( validate_slice(b as *const TimeSpec, 1).map(|req| &req[0])?, if c == 0 { None } else { Some(validate_slice_mut(c as *mut TimeSpec, 1).map(|rem| &mut rem[0])?) } ), SYS_CLOCK_GETTIME => clock_gettime(b, validate_slice_mut(c as *mut TimeSpec, 1).map(|time| &mut time[0])?), SYS_FUTEX => futex(validate_slice_mut(b as *mut i32, 1).map(|uaddr| &mut uaddr[0])?, c, d as i32, e, f as *mut i32), SYS_GETPID => getpid().map(ContextId::into), SYS_GETPGID => getpgid(ContextId::from(b)).map(ContextId::into), SYS_GETPPID => getppid().map(ContextId::into), SYS_CLONE => { let b = CloneFlags::from_bits_truncate(b); let old_rsp = stack.iret.rsp; if b.contains(flag::CLONE_STACK) { stack.iret.rsp = c; } let ret = clone(b, bp).map(ContextId::into); stack.iret.rsp = old_rsp; ret }, SYS_EXIT => exit((b & 0xFF) << 8), SYS_KILL => kill(ContextId::from(b), c), SYS_WAITPID => waitpid(ContextId::from(b), c, WaitFlags::from_bits_truncate(d)).map(ContextId::into), SYS_CHDIR => chdir(validate_slice(b as *const u8, c)?), SYS_IOPL => iopl(b, stack), SYS_GETCWD => getcwd(validate_slice_mut(b as *mut u8, c)?), SYS_GETEGID => getegid(), SYS_GETENS => getens(), SYS_GETEUID => geteuid(), SYS_GETGID => getgid(), SYS_GETNS => getns(), SYS_GETUID => getuid(), SYS_MPROTECT => mprotect(b, c, MapFlags::from_bits_truncate(d)), SYS_MKNS => mkns(validate_slice(b as *const [usize; 2], c)?), SYS_SETPGID => setpgid(ContextId::from(b), ContextId::from(c)), SYS_SETREUID => setreuid(b as u32, c as u32), SYS_SETRENS => setrens(SchemeNamespace::from(b), SchemeNamespace::from(c)), SYS_SETREGID => setregid(b as u32, c as u32), SYS_SIGACTION => sigaction( b, if c == 0 { None } else { Some(validate_slice(c as *const SigAction, 1).map(|act| &act[0])?) }, if d == 0 { None } else { Some(validate_slice_mut(d as *mut SigAction, 1).map(|oldact| &mut oldact[0])?) }, e ), SYS_SIGPROCMASK => sigprocmask( b, if c == 0 { None } else { Some(validate_slice(c as *const [u64; 2], 1).map(|s| &s[0])?) }, if d == 0 { None } else { Some(validate_slice_mut(d as *mut [u64; 2], 1).map(|s| &mut s[0])?) } ), SYS_SIGRETURN => sigreturn(), SYS_PIPE2 => pipe2(validate_slice_mut(b as *mut usize, 2)?, c), SYS_PHYSALLOC => physalloc(b), SYS_PHYSALLOC3 => physalloc3(b, c, &mut validate_slice_mut(d as *mut usize, 1)?[0]), SYS_PHYSFREE => physfree(b, c), SYS_PHYSMAP => physmap(b, c, PhysmapFlags::from_bits_truncate(d)), SYS_PHYSUNMAP => physunmap(b), SYS_UMASK => umask(b), SYS_VIRTTOPHYS => virttophys(b), _ => Err(Error::new(ENOSYS)) } } } /* let debug = { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); let name_raw = context.name.lock(); let name = unsafe { core::str::from_utf8_unchecked(&name_raw) }; if name == "file:/bin/cargo" || name == "file:/bin/rustc" { if a == SYS_CLOCK_GETTIME { false } else if (a == SYS_WRITE || a == SYS_FSYNC) && (b == 1 || b == 2) { false } else { true } } else { false } } else { false } }; if debug { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into()); } println!("{}", debug::format_call(a, b, c, d, e, f)); } */ // The next lines set the current syscall in the context struct, then once the inner() function // completes, we set the current syscall to none. // // When the code below falls out of scope it will release the lock // see the spin crate for details { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let mut context = context_lock.write(); context.syscall = Some((a, b, c, d, e, f)); } } let result = inner(a, b, c, d, e, f, bp, stack); { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let mut context = context_lock.write(); context.syscall = None; } } /* if debug { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into()); } print!("{} = ", debug::format_call(a, b, c, d, e, f)); match result { Ok(ref ok) => { println!("Ok({} ({:#X}))", ok, ok); }, Err(ref err) => { println!("Err({} ({:#X}))", err, err.errno); } } } */ // errormux turns Result<usize> into -errno Error::mux(result) }
40.567669
166
0.474006
e5402fe17871b8d5ff2ef06fe924749290bf03ba
16,532
use std::{mem, ops}; use num_traits::{AsPrimitive, Num, One, PrimInt, Unsigned, Zero}; use super::*; use crate::bit_type::BitType; use super::max_with_bits; /// An unsigned integer with N bits. #[derive(Default, Clone, Copy, Debug)] pub struct U<const N: usize>(<Underlying<N> as Type>::U) where Underlying<N>: Type; impl<const N: usize> U<N> where Underlying<N>: Type, { pub fn new(value: <Underlying<N> as Type>::U) -> Self { #[cfg(debug_assertions)] { assert!( value <= max_with_bits(N), "Value too large for {} bits, {:?} < {:?}", N, value, <Underlying<N> as Type>::U::one() << N, ); } U(value & max_with_bits(N)) } pub fn extract_underlying(self) -> <Underlying<N> as Type>::U { self.0 } } impl<const N: usize> ops::Add<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn add(self, rhs: U<N>) -> Self::Output { let value = self.0 + rhs.0; #[cfg(debug_assertions)] { if value > max_with_bits(N) { panic!("Attempted to add with overflow"); } } U(value & max_with_bits(N)) } } impl<const N: usize> ops::Sub<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn sub(self, rhs: U<N>) -> Self::Output { let value = self.0 - rhs.0; U(value & max_with_bits(N)) } } impl<const N: usize> ops::Mul<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn mul(self, rhs: U<N>) -> Self::Output { let value = self.0 * rhs.0; #[cfg(debug_assertions)] { if value > max_with_bits(N) { panic!("Attempted to multiply with overflow"); } } U(value & max_with_bits(N)) } } impl<const N: usize> ops::Div<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn div(self, rhs: U<N>) -> Self::Output { // can never overflow U(self.0 / rhs.0) } } impl<const N: usize> ops::Rem<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn rem(self, rhs: U<N>) -> Self::Output { // Can never overflow U(self.0 % rhs.0) } } impl<const N: usize> ops::BitAnd<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn bitand(self, rhs: U<N>) -> Self::Output { // Can never overflow U(self.0 & rhs.0) } } impl<const N: usize> ops::BitOr<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn bitor(self, rhs: U<N>) -> Self::Output { // Can never overflow U(self.0 | rhs.0) } } impl<const N: usize> ops::BitXor<U<N>> for U<N> where Underlying<N>: Type, { type Output = Self; fn bitxor(self, rhs: U<N>) -> Self::Output { // Can never overflow U(self.0 ^ rhs.0) } } impl<const N: usize> ops::Shl<usize> for U<N> where Underlying<N>: Type, { type Output = Self; fn shl(self, rhs: usize) -> Self::Output { #[cfg(debug_assertions)] { if rhs >= N { panic!("Attempted to shift left with overflow"); } } U((self.0 << rhs) & max_with_bits(N)) } } impl<const N: usize> ops::Shr<usize> for U<N> where Underlying<N>: Type, { type Output = Self; fn shr(self, rhs: usize) -> Self::Output { #[cfg(debug_assertions)] { if rhs >= N { panic!("Attempted to shift right with overflow"); } } U((self.0 & max_with_bits(N)) >> rhs) } } impl<const N: usize> ops::AddAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::AddAssign<<Underlying<N> as Type>::U>, { fn add_assign(&mut self, rhs: U<N>) { let value = self.0 + rhs.0; #[cfg(debug_assertions)] { if value > max_with_bits(N) { panic!("Attempted to add with overflow"); } } self.0 = value & max_with_bits(N); } } impl<const N: usize> ops::SubAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::SubAssign<<Underlying<N> as Type>::U>, { fn sub_assign(&mut self, rhs: U<N>) { let value = self.0 - rhs.0; self.0 = value & max_with_bits(N); } } impl<const N: usize> ops::MulAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::MulAssign<<Underlying<N> as Type>::U>, { fn mul_assign(&mut self, rhs: U<N>) { let value = self.0 * rhs.0; #[cfg(debug_assertions)] { if value > max_with_bits(N) { panic!("Attempted to multiply with overflow"); } } self.0 = value & max_with_bits(N); } } impl<const N: usize> ops::DivAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::DivAssign<<Underlying<N> as Type>::U>, { fn div_assign(&mut self, rhs: U<N>) { // Can never overflow self.0 /= rhs.0; } } impl<const N: usize> ops::RemAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::RemAssign<<Underlying<N> as Type>::U>, { fn rem_assign(&mut self, rhs: U<N>) { // Can never overflow self.0 %= rhs.0; } } impl<const N: usize> ops::BitAndAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::BitAndAssign<<Underlying<N> as Type>::U>, { fn bitand_assign(&mut self, rhs: U<N>) { // Can never overflow self.0 &= rhs.0; } } impl<const N: usize> ops::BitOrAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::BitOrAssign<<Underlying<N> as Type>::U>, { fn bitor_assign(&mut self, rhs: U<N>) { // Can never overflow self.0 |= rhs.0; } } impl<const N: usize> ops::BitXorAssign<U<N>> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::BitXorAssign<<Underlying<N> as Type>::U>, { fn bitxor_assign(&mut self, rhs: U<N>) { // Can never overflow self.0 ^= rhs.0; } } impl<const N: usize> ops::ShlAssign<usize> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::ShlAssign<usize>, <Underlying<N> as Type>::U: ops::BitAndAssign<<Underlying<N> as Type>::U>, { fn shl_assign(&mut self, rhs: usize) { #[cfg(debug_assertions)] { if rhs >= N { panic!("Attempted to shift left with overflow"); } } self.0 = (self.0 << rhs) & max_with_bits(N); } } impl<const N: usize> ops::ShrAssign<usize> for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: ops::ShrAssign<usize>, <Underlying<N> as Type>::U: ops::BitAndAssign<<Underlying<N> as Type>::U>, { fn shr_assign(&mut self, rhs: usize) { #[cfg(debug_assertions)] { if rhs >= N { panic!("Attempted to shift right with overflow"); } } self.0 = (self.0 << rhs) & max_with_bits(N); } } impl<const N: usize> ops::Not for U<N> where Underlying<N>: Type, { type Output = Self; fn not(self) -> Self::Output { U(!self.0 & max_with_bits(N)) } } impl<const N: usize> Zero for U<N> where Underlying<N>: Type, { fn zero() -> Self { U(<Underlying<N> as Type>::U::zero()) } fn is_zero(&self) -> bool { self.0.is_zero() } } impl<const N: usize> One for U<N> where Underlying<N>: Type, { fn one() -> Self { U(<Underlying<N> as Type>::U::one()) } } impl<const N: usize> PartialEq for U<N> where Underlying<N>: Type, { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<const N: usize> Eq for U<N> where Underlying<N>: Type {} impl<const N: usize> PartialOrd for U<N> where Underlying<N>: Type, { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { // Should never be overflowed self.0.partial_cmp(&other.0) } } impl<const N: usize> Ord for U<N> where Underlying<N>: Type, { fn cmp(&self, other: &Self) -> std::cmp::Ordering { // Should never be overflowed self.0.cmp(&other.0) } } impl<const N: usize> Num for U<N> where Underlying<N>: Type, { type FromStrRadixErr = <<Underlying<N> as Type>::U as Num>::FromStrRadixErr; fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> { <Underlying<N> as Type>::U::from_str_radix(str, radix).map(U) } } impl<const N: usize> Unsigned for U<N> where Underlying<N>: Type {} impl<const N: usize> fmt::Display for U<N> where Underlying<N>: Type, <Underlying<N> as Type>::U: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } impl<const A: usize, const B: usize> AsPrimitive<U<A>> for U<B> where Underlying<A>: Type, Underlying<B>: Type, <Underlying<B> as Type>::U: AsPrimitive<<Underlying<A> as Type>::U>, { fn as_(self) -> U<A> { U::new(self.0.as_()) } } impl<T: PrimInt + 'static, const B: usize> AsPrimitive<T> for U<B> where Underlying<B>: Type, <Underlying<B> as Type>::U: AsPrimitive<T>, { fn as_(self) -> T { self.0.as_() } } macro_rules! impl_bit_type { ($n:literal) => { impl BitType for U<$n> { const BITS: usize = $n; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { if slice.len() == (Self::BITS + 7) / 8 { let mut num: <Underlying<{ Self::BITS }> as Type>::U = unsafe { mem::zeroed() }; let mut bits = !num; let num_slice = unsafe { mem::transmute::< &mut <Underlying<{ Self::BITS }> as Type>::U, &mut [u8; mem::size_of::<Self>()], >(&mut num) }; num_slice[0..mem::size_of::<Self>()].copy_from_slice(unsafe { mem::transmute::<&Self, &[u8; mem::size_of::<Self>()]>(aligned) }); num <<= mem::size_of::<Self>() * 8 - Self::BITS; bits <<= mem::size_of::<Self>() * 8 - Self::BITS; num >>= mem::size_of::<Self>() * 8 - Self::BITS - offset; bits >>= mem::size_of::<Self>() * 8 - Self::BITS - offset; let target_num = unsafe { mem::transmute::< &mut [u8], (&mut <Underlying<{ Self::BITS }> as Type>::U, usize), >(slice) .0 }; *target_num &= !bits; *target_num |= num; } else { let mut num: <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U = unsafe { mem::zeroed() }; let mut bits = !num; let num_slice = unsafe { mem::transmute::< &mut <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U, &mut [u8; mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>()], >(&mut num) }; num_slice[0..mem::size_of::<Self>()].copy_from_slice(unsafe { mem::transmute::<&Self, &[u8; mem::size_of::<Self>()]>(aligned) }); num <<= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - Self::BITS; bits <<= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - Self::BITS; num >>= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - Self::BITS - offset; bits >>= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - Self::BITS - offset; let target_num = unsafe { mem::transmute::< &mut [u8], ( &mut <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U, usize, ), >(slice) .0 }; *target_num &= !bits; *target_num |= num; } } fn to_aligned(slice: &[u8], offset: usize) -> Self { if slice.len() == (Self::BITS + 7) / 8 { let mut num: <Underlying<{ Self::BITS }> as Type>::U = unsafe { mem::zeroed() }; let num_slice = unsafe { mem::transmute::< &mut <Underlying<{ Self::BITS }> as Type>::U, &mut [u8; mem::size_of::<<Underlying<{ Self::BITS }> as Type>::U>( )], >(&mut num) }; num_slice[0..slice.len()].copy_from_slice(slice); num <<= mem::size_of::<Self>() * 8 - <Self>::BITS - offset; num >>= mem::size_of::<Self>() * 8 - <Self>::BITS; unsafe { mem::transmute_copy(&num) } } else { let mut num: <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U = unsafe { mem::zeroed() }; let num_slice = unsafe { mem::transmute::< &mut <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U, &mut [u8; mem::size_of::< <<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U, >()], >(&mut num) }; num_slice[0..slice.len()].copy_from_slice(slice); num <<= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - <Self>::BITS - offset; num >>= mem::size_of::<<<Underlying<{ Self::BITS }> as Type>::Higher as Type>::U>() * 8 - <Self>::BITS; unsafe { mem::transmute_copy(&num) } } } } }; ($($n: literal), +$(,)?) => { $(impl_bit_type!{$n})+ }; } impl_bit_type! { 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, } impl BitType for U<1> { const BITS: usize = 1; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { bool::from_aligned(&(aligned.0 & 1 == 1), slice, offset) } fn to_aligned(slice: &[u8], offset: usize) -> Self { U(bool::to_aligned(slice, offset) as u8) } } impl BitType for U<8> { const BITS: usize = 8; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { u8::from_aligned(&aligned.0, slice, offset) } fn to_aligned(slice: &[u8], offset: usize) -> Self { U(u8::to_aligned(slice, offset)) } } impl BitType for U<16> { const BITS: usize = 16; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { u16::from_aligned(&aligned.0, slice, offset) } fn to_aligned(slice: &[u8], offset: usize) -> Self { U(u16::to_aligned(slice, offset)) } } impl BitType for U<32> { const BITS: usize = 32; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { u32::from_aligned(&aligned.0, slice, offset) } fn to_aligned(slice: &[u8], offset: usize) -> Self { U(u32::to_aligned(slice, offset)) } } impl BitType for U<64> { const BITS: usize = 64; fn from_aligned(aligned: &Self, slice: &mut [u8], offset: usize) { u64::from_aligned(&aligned.0, slice, offset) } fn to_aligned(slice: &[u8], offset: usize) -> Self { U(u64::to_aligned(slice, offset)) } }
28.801394
132
0.489717
013761b2817b1927cec646fa29facc04fb9a7aec
4,674
use super::{grpc, BlockConfig}; use crate::blockcfg::{Block, HeaderHash}; use crate::blockchain::{self, Blockchain, Error as BlockchainError, PreCheckedHeader, Ref, Tip}; use crate::settings::start::network::Peer; use chain_core::property::HasHeader; use network_core::client::{BlockService, Client as _}; use network_core::error::Error as NetworkError; use network_grpc::client::Connection; use slog::Logger; use thiserror::Error; use tokio::prelude::*; use tokio::runtime::Runtime; use std::fmt::Debug; use std::io; use std::sync::Arc; #[derive(Error, Debug)] pub enum Error { #[error("runtime initialization failed")] RuntimeInit { source: io::Error }, #[error("failed to connect to bootstrap peer")] Connect { source: grpc::ConnectError }, #[error("connection broken")] ClientNotReady { source: NetworkError }, #[error("bootstrap pull request failed")] PullRequestFailed { source: NetworkError }, #[error("bootstrap pull stream failed")] PullStreamFailed { source: NetworkError }, #[error("block header check failed")] HeaderCheckFailed { source: BlockchainError }, #[error("received block {0} is already present")] BlockAlreadyPresent(HeaderHash), #[error("received block {0} is not connected to the block chain")] BlockMissingParent(HeaderHash), #[error("failed to apply block to the blockchain")] ApplyBlockFailed { source: BlockchainError }, #[error("failed to select the new tip")] ChainSelectionFailed { source: BlockchainError }, } pub fn bootstrap_from_peer( peer: Peer, blockchain: Blockchain, branch: Tip, logger: Logger, ) -> Result<Arc<Ref>, Error> { info!(logger, "connecting to bootstrap peer {}", peer.connection); let runtime = Runtime::new().map_err(|e| Error::RuntimeInit { source: e })?; let blockchain2 = blockchain.clone(); let logger2 = logger.clone(); let bootstrap = grpc::connect(peer.address(), None, runtime.executor()) .map_err(|e| Error::Connect { source: e }) .and_then(|client: Connection<BlockConfig>| { client .ready() .map_err(|e| Error::ClientNotReady { source: e }) }) .join(branch.get_ref()) .and_then(move |(mut client, tip)| { let tip_hash = tip.hash(); debug!(logger, "pulling blocks starting from {}", tip_hash); client .pull_blocks_to_tip(&[tip_hash]) .map_err(|e| Error::PullRequestFailed { source: e }) .and_then(move |stream| bootstrap_from_stream(blockchain, tip, stream, logger)) }) .and_then(move |tip| { blockchain::process_new_ref(logger2, blockchain2, branch, tip.clone()) .map_err(|e| Error::ChainSelectionFailed { source: e }) .map(|()| tip) }); runtime.block_on_all(bootstrap) } fn bootstrap_from_stream<S>( blockchain: Blockchain, tip: Arc<Ref>, stream: S, logger: Logger, ) -> impl Future<Item = Arc<Ref>, Error = Error> where S: Stream<Item = Block, Error = NetworkError>, S::Error: Debug, { let fold_logger = logger.clone(); stream .map_err(|e| Error::PullStreamFailed { source: e }) .fold(tip, move |_, block| { handle_block(blockchain.clone(), block, fold_logger.clone()) }) } fn handle_block( mut blockchain: Blockchain, block: Block, logger: Logger, ) -> impl Future<Item = Arc<Ref>, Error = Error> { let header = block.header(); trace!( logger, "received block from the bootstrap node: {:#?}", header ); let mut end_blockchain = blockchain.clone(); blockchain .pre_check_header(header, true) .map_err(|e| Error::HeaderCheckFailed { source: e }) .and_then(|pre_checked| match pre_checked { PreCheckedHeader::AlreadyPresent { header, .. } => { Err(Error::BlockAlreadyPresent(header.hash())) } PreCheckedHeader::MissingParent { header, .. } => { Err(Error::BlockMissingParent(header.hash())) } PreCheckedHeader::HeaderWithCache { header, parent_ref } => Ok((header, parent_ref)), }) .and_then(move |(header, parent_ref)| { blockchain .post_check_header(header, parent_ref) .map_err(|e| Error::HeaderCheckFailed { source: e }) }) .and_then(move |post_checked| { end_blockchain .apply_and_store_block(post_checked, block) .map_err(|e| Error::ApplyBlockFailed { source: e }) }) }
35.142857
97
0.616602
e695b7290dc7a2fc9639bfccbf6a882f87da11d3
4,499
//! Implementation of the `#[simd_test]` macro //! //! This macro expands to a `#[test]` function which tests the local machine //! for the appropriate cfg before calling the inner test function. #![feature(proc_macro)] extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate quote; use std::env; use proc_macro2::{Ident, Literal, Span, TokenStream, TokenTree}; fn string(s: &str) -> TokenTree { Literal::string(s).into() } #[proc_macro_attribute] pub fn simd_test( attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let tokens = TokenStream::from(attr).into_iter().collect::<Vec<_>>(); if tokens.len() != 3 { panic!("expected #[simd_test(enable = \"feature\")]"); } match &tokens[0] { TokenTree::Ident(tt) if tt.to_string() == "enable" => {} _ => panic!("expected #[simd_test(enable = \"feature\")]"), } match &tokens[1] { TokenTree::Punct(tt) if tt.as_char() == '=' => {} _ => panic!("expected #[simd_test(enable = \"feature\")]"), } let enable_feature = match &tokens[2] { TokenTree::Literal(tt) => tt.to_string(), _ => panic!("expected #[simd_test(enable = \"feature\")]"), }; let enable_feature = enable_feature .trim_left_matches('"') .trim_right_matches('"'); let target_features: Vec<String> = enable_feature .replace('+', "") .split(',') .map(|v| String::from(v)) .collect(); let enable_feature = string(enable_feature); let item = TokenStream::from(item); let name = find_name(item.clone()); let name: TokenStream = name .to_string() .parse() .expect(&format!("failed to parse name: {}", name.to_string())); let target = env::var("TARGET") .expect("TARGET environment variable should be set for rustc"); let mut force_test = false; let macro_test = match target .split('-') .next() .expect(&format!("target triple contained no \"-\": {}", target)) { "i686" | "x86_64" | "i586" => "is_x86_feature_detected", "arm" | "armv7" => "is_arm_feature_detected", "aarch64" => "is_aarch64_feature_detected", "powerpc" | "powerpcle" => "is_powerpc_feature_detected", "powerpc64" | "powerpc64le" => "is_powerpc64_feature_detected", "mips" | "mipsel" => { // FIXME: // On MIPS CI run-time feature detection always returns false due // to this qemu bug: https://bugs.launchpad.net/qemu/+bug/1754372 // // This is a workaround to force the MIPS tests to always run on // CI. force_test = true; "is_mips_feature_detected" } "mips64" | "mips64el" => { // FIXME: see above force_test = true; "is_mips64_feature_detected" } t => panic!("unknown target: {}", t), }; let macro_test = Ident::new(macro_test, Span::call_site()); let mut cfg_target_features = TokenStream::new(); use quote::ToTokens; for feature in target_features { let q = quote_spanned! { proc_macro2::Span::call_site() => #macro_test!(#feature) && }; q.to_tokens(&mut cfg_target_features); } let q = quote!{ true }; q.to_tokens(&mut cfg_target_features); let test_norun = std::env::var("STDSIMD_TEST_NORUN").is_ok(); let maybe_ignore = if !test_norun { TokenStream::new() } else { (quote! { #[ignore] }).into() }; let ret: TokenStream = quote_spanned! { proc_macro2::Span::call_site() => #[allow(non_snake_case)] #[test] #maybe_ignore fn #name() { if #force_test | (#cfg_target_features) { return unsafe { #name() }; } else { ::stdsimd_test::assert_skip_test_ok(stringify!(#name)); } #[target_feature(enable = #enable_feature)] #item } }.into(); ret.into() } fn find_name(item: TokenStream) -> Ident { let mut tokens = item.into_iter(); while let Some(tok) = tokens.next() { if let TokenTree::Ident(word) = tok { if word == "fn" { break; } } } match tokens.next() { Some(TokenTree::Ident(word)) => word, _ => panic!("failed to find function name"), } }
31.243056
77
0.562569
0ef0c4f9cba0798daf5a6f9ee9ede27230be809b
683
use num_complex::Complex32; use std::{f32::consts::FRAC_1_SQRT_2, sync::Arc}; pub mod ferrite; pub mod mpstthree; pub mod rumpsteak; pub mod rustfft; pub mod sesh; fn zip_with( x: Arc<[Complex32]>, y: Arc<[Complex32]>, f: impl Fn(Complex32, Complex32) -> Complex32, ) -> Arc<[Complex32]> { x.iter().zip(y.iter()).map(|(&x, &y)| f(x, y)).collect() } fn rotate_45(input: Complex32) -> Complex32 { (rotate_90(input) + input) * Complex32::from(FRAC_1_SQRT_2) } fn rotate_90(input: Complex32) -> Complex32 { Complex32::new(input.im, -input.re) } fn rotate_135(input: Complex32) -> Complex32 { (rotate_90(input) - input) * Complex32::from(FRAC_1_SQRT_2) }
23.551724
63
0.66325
f77a5ef276236f810a84fef50e2110879a2b94df
18,504
use crate::packet::CompressedLossList; use crate::protocol::encryption::Cipher; use crate::protocol::sender::encapsulate::Encapsulate; use crate::{ConnectionSettings, DataPacket, SeqNumber}; use bytes::Bytes; use std::cmp::max; use std::collections::{BTreeSet, VecDeque}; use std::time::{Duration, Instant}; #[derive(Debug)] pub struct SendBuffer { encapsulate: Encapsulate, encrypt: Cipher, latency_window: Duration, buffer: VecDeque<DataPacket>, next_send: Option<SeqNumber>, // 1) Sender's Loss List: The sender's loss list is used to store the // sequence numbers of the lost packets fed back by the receiver // through NAK packets or inserted in a timeout event. The numbers // are stored in increasing order. lost_list: BTreeSet<SeqNumber>, } impl SendBuffer { pub fn new(settings: &ConnectionSettings) -> Self { Self { encapsulate: Encapsulate::new(settings), encrypt: Cipher::new(settings.crypto_manager.clone()), buffer: VecDeque::new(), next_send: None, lost_list: BTreeSet::new(), latency_window: max( settings.send_tsbpd_latency + settings.send_tsbpd_latency / 4, // 125% of TSBPD Duration::from_secs(1), ), } } pub fn push_data(&mut self, data: (Instant, Bytes)) -> u64 { let encapsulate = &mut self.encapsulate; let buffer = &mut self.buffer; let encrypt = &mut self.encrypt; encapsulate.encapsulate(data, |packet| { let (packet, _) = encrypt.encrypt(packet); buffer.push_back(packet); }) } pub fn is_flushed(&self) -> bool { self.lost_list.is_empty() && self.buffer.is_empty() } pub fn pop_next_lost_packet(&mut self) -> Option<DataPacket> { let next_lost = self.pop_lost_list()?; let front = self.front_packet()?; let offset = next_lost - front; let mut packet = self.buffer.get(offset as usize)?.clone(); packet.retransmitted = true; Some(packet) } pub fn has_packets_to_send(&self) -> bool { self.peek_next_packet().is_some() || !self.lost_list.is_empty() } pub fn number_of_unacked_packets(&mut self) -> u32 { self.buffer.len() as u32 } pub fn pop_next_packet(&mut self) -> Option<DataPacket> { let packet = self.peek_next_packet()?.clone(); self.next_send = Some(packet.seq_number + 1); Some(packet) } pub fn pop_next_16n_packet(&mut self) -> Option<DataPacket> { match self.peek_next_packet().map(|p| p.seq_number % 16) { Some(0) => self.pop_next_packet(), _ => None, } } pub fn flush_on_close(&mut self, should_drain: bool) -> Option<DataPacket> { if should_drain && self.buffer.len() == 1 { self.next_send = None; self.buffer.pop_front() } else { None } } pub fn update_largest_acked_seq_number(&mut self, ack_number: SeqNumber) -> Option<(u32, u32)> { let first = self.front_packet()?; let next = self.next_send?; if ack_number < first || ack_number > next { return None; } let mut recovered_count = 0; let mut received_count = 0; while self.peek_next_lost(ack_number).is_some() { let _ = self.pop_lost_list(); recovered_count += 1; } while self .front_packet() .filter(|f| *f < ack_number - 1) .is_some() { let _ = self.buffer.pop_front(); received_count += 1; } Some((received_count, recovered_count)) } pub fn add_to_loss_list( &mut self, nak: CompressedLossList, ) -> impl Iterator<Item = (Loss, SeqNumber, SeqNumber)> + '_ { LossIterator { loss_list: nak.into_iter_decompressed(), first: None, buffer: self, } } pub fn drop_too_late_packets(&mut self, now: Instant) -> Option<(SeqNumber, SeqNumber)> { let latency_window = self.latency_window; let ts_now = self.encapsulate.timestamp_from(now); let front = self .buffer .front() .filter(|p| ts_now > p.timestamp + latency_window)?; let first = front.seq_number; let mut last = first; let mut message = front.message_number; for next in self.buffer.iter().skip(1) { if ts_now > next.timestamp + latency_window { message = next.message_number; last = next.seq_number; } else if next.message_number == message { last = next.seq_number; } else { break; } } let count = last - first + 1; let _ = self.buffer.drain(0..count as usize).count(); self.next_send = self .next_send .filter(|next| *next > last) .or(Some(last + 1)); Some((first, last)) } fn front_packet(&self) -> Option<SeqNumber> { self.buffer.front().map(|p| p.seq_number) } fn peek_next_packet(&self) -> Option<&DataPacket> { let first = self.front_packet()?; let next_send = self.next_send.unwrap_or(first); let index = next_send - first; self.buffer.get(index as usize) } fn pop_lost_list(&mut self) -> Option<SeqNumber> { let next = self.lost_list.iter().copied().next()?; let _ = self.lost_list.remove(&next); Some(next) } fn peek_next_lost(&self, seq_num: SeqNumber) -> Option<SeqNumber> { self.lost_list .iter() .filter(|first| *(*first) < seq_num) .copied() .next() } } #[derive(Clone, Debug, PartialEq)] pub enum Loss { Added, Dropped, Ignored, } pub struct LossIterator<'a, I: Iterator<Item = SeqNumber>> { buffer: &'a mut SendBuffer, loss_list: I, first: Option<(Loss, SeqNumber)>, } impl<'a, I> LossIterator<'a, I> where I: Iterator<Item = SeqNumber>, { fn next_loss(&mut self) -> Option<(Loss, SeqNumber)> { use Loss::*; let front = self.buffer.front_packet(); let next_send = self.buffer.next_send; self.loss_list.next().map(|next| match (front, next_send) { (_, Some(next_send)) if next >= next_send => (Ignored, next), (_, None) => (Dropped, next), (Some(front), _) if next < front => (Dropped, next), (None, _) => (Dropped, next), (Some(_), Some(_)) => { self.buffer.lost_list.insert(next); (Added, next) } }) } } impl<'a, I> Iterator for LossIterator<'a, I> where I: Iterator<Item = SeqNumber>, { type Item = (Loss, SeqNumber, SeqNumber); fn next(&mut self) -> Option<Self::Item> { let (first_type, first) = self.first.clone().or_else(|| self.next_loss())?; let mut last = first; loop { match self.next_loss() { Some((next_type, next)) if next_type == first_type && next == last + 1 => { last = next; continue; } Some((next_type, next)) => { self.first = Some((next_type, next)); return Some((first_type, first, last)); } None => { self.first = None; return Some((first_type, first, last)); } } } } } #[cfg(test)] mod test { use super::*; use crate::packet::{DataEncryption, PacketLocation}; use crate::protocol::TimeStamp; use crate::*; use bytes::Bytes; use std::iter::FromIterator; use std::time::{Duration, Instant}; const MILLIS: Duration = Duration::from_millis(1); const TSBPD: Duration = Duration::from_secs(2); fn new_settings(start: Instant) -> ConnectionSettings { ConnectionSettings { remote: ([127, 0, 0, 1], 2223).into(), remote_sockid: SocketId(2), local_sockid: SocketId(2), socket_start_time: start, rtt: Duration::default(), init_seq_num: SeqNumber::new_truncate(0), max_packet_size: 1316, max_flow_size: 8192, send_tsbpd_latency: TSBPD, recv_tsbpd_latency: TSBPD, crypto_manager: None, stream_id: None, bandwidth: LiveBandwidthMode::default(), } } #[test] fn not_ready_empty() { let start = Instant::now(); let settings = new_settings(start); let mut buffer = SendBuffer::new(&settings); let data = Bytes::new(); buffer.push_data((start, data)); assert!(!buffer.is_flushed()); assert_eq!( buffer.pop_next_packet(), Some(DataPacket { seq_number: SeqNumber(0), message_loc: PacketLocation::ONLY, in_order_delivery: false, encryption: DataEncryption::None, retransmitted: false, message_number: MsgNumber(0), timestamp: TimeStamp::MIN, dest_sockid: SocketId(2), payload: Bytes::new() }) ); assert!(!buffer.is_flushed()); } #[test] fn pop_next_packet() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=16u32 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::new())); assert!(buffer.has_packets_to_send()); assert!(!buffer.is_flushed()); } for n in 0..=16 { let next_packet = buffer.pop_next_packet().map(|p| p.seq_number.as_raw()); let next_packet_16n = buffer.pop_next_16n_packet().map(|p| p.seq_number.as_raw()); if n < 15 { assert_eq!(next_packet, Some(n)); assert_eq!(next_packet_16n, None); } else if n < 16 { assert_eq!(next_packet, Some(n)); assert_eq!(next_packet_16n, Some(n + 1)); } else { assert_eq!(next_packet, None); assert_eq!(next_packet_16n, None); } } assert!(!buffer.has_packets_to_send()); assert!(!buffer.is_flushed()); } #[test] fn pop_next_lost_packet() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=13 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::new())); } for _ in 0..=11 { assert_ne!(buffer.pop_next_packet(), None); } assert_eq!(buffer.pop_next_lost_packet(), None); assert!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(11), SeqNumber(13)].into_iter(), )) .count() > 0 ); assert!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(7), SeqNumber(12)].into_iter(), )) .count() > 0 ); // the spec suggests the loss list should be ordered smallest to largest let next = buffer .pop_next_lost_packet() .map(|p| (p.seq_number.as_raw(), p.retransmitted)); assert_eq!(next, Some((7, true))); let next = buffer .pop_next_lost_packet() .map(|p| (p.seq_number.as_raw(), p.retransmitted)); assert_eq!(next, Some((11, true))); assert_eq!(buffer.pop_next_lost_packet(), None); assert!(buffer.has_packets_to_send()); assert!(!buffer.is_flushed()); } #[test] fn on_ack() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); assert!(buffer.is_flushed()); for n in 0..=3 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::new())); } for _ in 0..=2 { assert_ne!(buffer.pop_next_packet(), None); } assert_eq!(buffer.number_of_unacked_packets(), 4); // mark two packets received, one packet is kept around for retransmit on flush // keeping this original behavior intact otherwise integration tests fail assert_eq!( buffer.update_largest_acked_seq_number(SeqNumber(3)), Some((2, 0)) ); assert_eq!(buffer.number_of_unacked_packets(), 2); assert!(!buffer.is_flushed()); assert!(buffer.has_packets_to_send()); // NAK for packets from the past should be ignored assert!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(1)].into_iter(), )) .count() > 0 ); assert_eq!(buffer.pop_next_lost_packet(), None); assert_eq!(buffer.number_of_unacked_packets(), 2); assert!(!buffer.is_flushed()); assert!(buffer.has_packets_to_send()); // ACK for unsent packets should be ignored assert_eq!(buffer.update_largest_acked_seq_number(SeqNumber(4)), None); assert_eq!(buffer.number_of_unacked_packets(), 2); assert!(!buffer.is_flushed()); assert!(buffer.has_packets_to_send()); assert_ne!(buffer.pop_next_packet(), None); assert_eq!(buffer.pop_next_packet(), None); assert_eq!(buffer.number_of_unacked_packets(), 2); assert!(!buffer.is_flushed()); assert!(!buffer.has_packets_to_send()); assert_eq!( buffer.update_largest_acked_seq_number(SeqNumber(4)), Some((1, 0)) ); assert_eq!(buffer.number_of_unacked_packets(), 1); assert!(!buffer.is_flushed()); assert!(!buffer.has_packets_to_send()); } #[test] fn nak_then_ack() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=2 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::new())); assert_ne!(buffer.pop_next_packet(), None); } assert!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(1)].into_iter(), )) .count() > 0 ); // two packets received, one recovered assert_eq!( buffer.update_largest_acked_seq_number(SeqNumber(3)), Some((2, 1)) ); assert_eq!(buffer.pop_next_lost_packet(), None); assert_eq!(buffer.number_of_unacked_packets(), 1); assert!(!buffer.has_packets_to_send()); assert!(!buffer.is_flushed()); } #[test] fn drop_too_late_packets_queued() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=2 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::from_iter([0u8; 2048]))); } // only drop the too late packets, leave the rest queued let now = start + TSBPD + TSBPD / 4 + 2 * MILLIS; assert_eq!( buffer.drop_too_late_packets(now), Some((SeqNumber(0), SeqNumber(3))) ); assert_eq!(buffer.drop_too_late_packets(now), None); assert!(!buffer.is_flushed()) } #[test] fn drop_too_late_packets_sent() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=2 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::from_iter([0u8; 2048]))); } // simulate sending packets from the first two messages assert_ne!(buffer.pop_next_packet(), None); assert_ne!(buffer.pop_next_packet(), None); assert_ne!(buffer.pop_next_packet(), None); // only drop the too late packets, leave the rest queued let now = start + TSBPD + TSBPD / 4 + 2 * MILLIS; assert_eq!( buffer.drop_too_late_packets(now), Some((SeqNumber(0), SeqNumber(3))) ); assert_eq!(buffer.drop_too_late_packets(now), None); assert!(!buffer.is_flushed()) } #[test] fn drop_too_late_packets_lost() { let start = Instant::now(); let mut buffer = SendBuffer::new(&new_settings(start)); for n in 0..=2 { let now = start + n * MILLIS; buffer.push_data((now, Bytes::from_iter([0u8; 2048]))); } // simulate sending packets from the first two messages assert_ne!(buffer.pop_next_packet(), None); assert_ne!(buffer.pop_next_packet(), None); assert_ne!(buffer.pop_next_packet(), None); use Loss::*; assert_eq!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(1), SeqNumber(2), SeqNumber(3), SeqNumber(5)].into_iter(), )) .collect::<Vec<_>>(), vec![ (Added, SeqNumber(1), SeqNumber(2)), (Ignored, SeqNumber(3), SeqNumber(3)), (Ignored, SeqNumber(5), SeqNumber(5)), ] ); // only drop the too late packets, leave the rest queued let now = start + TSBPD + TSBPD / 4 + 2 * MILLIS; assert_eq!( buffer.drop_too_late_packets(now), Some((SeqNumber(0), SeqNumber(3))) ); assert_eq!(buffer.drop_too_late_packets(now), None); assert!(!buffer.is_flushed()); assert_eq!( buffer .add_to_loss_list(CompressedLossList::from_loss_list( vec![SeqNumber(1), SeqNumber(2), SeqNumber(3), SeqNumber(5)].into_iter(), )) .collect::<Vec<_>>(), vec![ (Dropped, SeqNumber(1), SeqNumber(3)), (Ignored, SeqNumber(5), SeqNumber(5)), ] ); } }
32.18087
100
0.543612
08992ba44b040a962ac4c59e681bbad697da3969
10,034
use std::convert::{TryFrom, TryInto}; use image::RgbaImage; use crate::{swizzle::swizzle, CubeLut3d}; #[cfg(test)] use indoc::indoc; /// A 3D RGBA LUT with unswizzled data in row major order. /// Values are written to data using a nested ZYX loops with X being the innermost loop. #[derive(Debug, PartialEq)] pub struct Lut3dLinear { size: u32, data: Vec<u8>, } impl Lut3dLinear { /// The dimension of the LUT for each axis. A LUT with size 16 will have 16x16x16 RGBA values. pub fn size(&self) -> u32 { self.size } pub fn new(size: u32, data: Vec<u8>) -> Lut3dLinear { Lut3dLinear { size, data } } } impl AsRef<[u8]> for Lut3dLinear { fn as_ref(&self) -> &[u8] { &self.data } } impl From<Lut3dSwizzled> for Lut3dLinear { /// Deswizzle the data in value to create a `Lut3dLinear` of identical size. fn from(value: Lut3dSwizzled) -> Self { let mut data = vec![0u8; value.data.len()]; swizzle(&value.data, &mut data, true); Lut3dLinear { size: value.size, data, } } } impl From<CubeLut3d> for Lut3dLinear { fn from(value: CubeLut3d) -> Self { let mut data = Vec::new(); // TODO: How to handle out of range values? let to_u8 = |f: f32| (f * 255f32).min(255f32).round() as u8; for &(r, g, b) in value.data() { // Always use 255u8 for alpha to match in game nutexb LUTs. data.push(to_u8(r)); data.push(to_u8(g)); data.push(to_u8(b)); data.push(255u8); } Lut3dLinear { size: value.size() as u32, data, } } } impl TryFrom<RgbaImage> for Lut3dLinear { type Error = &'static str; /// Tries to convert an image with slices in z arranged horizontally along the top of the image. /// For example, a 16x16x16 LUT image must have dimensions at least 256x16 pixels. fn try_from(value: RgbaImage) -> Result<Self, Self::Error> { (&value).try_into() } } impl TryFrom<&RgbaImage> for Lut3dLinear { type Error = &'static str; /// Tries to convert an image with slices in z arranged horizontally along the top of the image. /// For example, a 16x16x16 LUT image must have dimensions at least 256x16 pixels. fn try_from(value: &RgbaImage) -> Result<Self, Self::Error> { if value.width() != value.height() * value.height() { Err("Invalid dimensions. Expected width to equal height * height.") } else { let data = value.as_flat_samples().samples.to_vec(); let lut = Lut3dLinear { size: value.height(), data, }; Ok(lut) } } } impl TryFrom<Lut3dLinear> for RgbaImage { type Error = &'static str; fn try_from(value: Lut3dLinear) -> Result<Self, Self::Error> { RgbaImage::from_raw(value.size * value.size, value.size, value.data) .ok_or("Error creating RgbaImage.") } } impl TryFrom<&Lut3dLinear> for RgbaImage { type Error = &'static str; fn try_from(value: &Lut3dLinear) -> Result<Self, Self::Error> { RgbaImage::from_raw(value.size * value.size, value.size, value.data.clone()) .ok_or("Error creating RgbaImage.") } } /// A 3D RGBA LUT with swizzled data. #[derive(Debug, PartialEq)] pub struct Lut3dSwizzled { size: u32, data: Vec<u8>, } // TODO: Wrap this into another trait to store size, data by ref, etc? impl Lut3dSwizzled { /// The dimension of the LUT for each axis. A LUT with size 16 will have 16x16x16 RGBA values. pub fn size(&self) -> u32 { self.size } pub fn new(size: u32, data: Vec<u8>) -> Lut3dSwizzled { Lut3dSwizzled { size, data } } } impl AsRef<[u8]> for Lut3dSwizzled { fn as_ref(&self) -> &[u8] { &self.data } } impl From<&Lut3dLinear> for Lut3dSwizzled { /// Swizzle the data in value to create a `Lut3dLinear` of identical size. fn from(value: &Lut3dLinear) -> Self { let mut data = vec![0u8; value.data.len()]; swizzle(&value.data, &mut data, false); Lut3dSwizzled { size: value.size, data, } } } impl From<Lut3dLinear> for Lut3dSwizzled { /// Swizzle the data in value to create a `Lut3dLinear` of identical size. fn from(value: Lut3dLinear) -> Self { (&value).into() } } #[cfg(test)] mod tests { use super::*; #[test] fn cube_to_linear() { let text = indoc! {r#" # comment LUT_3D_SIZE 2 # comment 0 0 0 1 0 0 0 .75 0 1 .75 0 0 .25 1 1 .25 1 0 1 1 1 1 1 "#}; let cube = CubeLut3d::from_text(text).unwrap(); let linear = Lut3dLinear::from(cube); assert_eq!(2, linear.size); itertools::assert_equal( &linear.data, &[ 0u8, 0u8, 0u8, 255u8, 255u8, 0u8, 0u8, 255u8, 0u8, 191u8, 0u8, 255u8, 255u8, 191u8, 0u8, 255u8, 0u8, 64u8, 255u8, 255u8, 255u8, 64u8, 255u8, 255u8, 0u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, 255u8, ], ) } #[test] fn linear_to_rgba() { let data = crate::create_default_lut().to_vec(); let linear = Lut3dLinear { size: 16u32, data }; let img = RgbaImage::try_from(linear).unwrap(); assert_eq!(256u32, img.width()); assert_eq!(16u32, img.height()); // Make sure the pixel values were copied correctly. let data = crate::create_default_lut().to_vec(); itertools::assert_equal(&data, img.as_flat_samples().samples.into_iter()); } #[test] fn linear_ref_to_rgba() { let data = crate::create_default_lut().to_vec(); let linear = Lut3dLinear { size: 16u32, data }; let img = RgbaImage::try_from(&linear).unwrap(); assert_eq!(256u32, img.width()); assert_eq!(16u32, img.height()); // Make sure the pixel values were copied correctly. let data = crate::create_default_lut().to_vec(); itertools::assert_equal(&data, img.as_flat_samples().samples.into_iter()); } #[test] fn rgba_ref_to_linear() { let data = crate::create_default_lut().to_vec(); let img = RgbaImage::from_raw(256, 16, data).unwrap(); let linear = Lut3dLinear::try_from(&img).unwrap(); assert_eq!(16u32, linear.size); assert_eq!(crate::image_size(16, 16, 16, 4), linear.data.len()); // Make sure the pixel values were copied correctly. let data = crate::create_default_lut().to_vec(); itertools::assert_equal(&data, &linear.data); } #[test] fn rgba_to_linear() { let data = crate::create_default_lut().to_vec(); let img = RgbaImage::from_raw(256, 16, data).unwrap(); let linear = Lut3dLinear::try_from(img).unwrap(); assert_eq!(16u32, linear.size); assert_eq!(crate::image_size(16, 16, 16, 4), linear.data.len()); // Make sure the pixel values were copied correctly. let data = crate::create_default_lut().to_vec(); itertools::assert_equal(&data, &linear.data); } #[test] fn rgba_to_linear_invalid_dimensions() { // The width should be height^2. let data = crate::create_default_lut().to_vec(); let img = RgbaImage::from_raw(128, 32, data).unwrap(); let linear = Lut3dLinear::try_from(&img); assert_eq!( linear, Err("Invalid dimensions. Expected width to equal height * height.") ); } #[test] fn linear_ref_to_swizzled() { // Test that the data is correctly swizzled when converting. let data = crate::create_default_lut().to_vec(); let linear = Lut3dLinear { size: 16u32, data }; let swizzled: Lut3dSwizzled = (&linear).into(); assert_eq!(16u32, swizzled.size); // Black swizzled address: 0 (0000 0000 0000 0000) assert_eq!(&[0u8, 0u8, 0u8, 255u8], &swizzled.data[0..4]); // Red swizzled address: 300 (0000 0001 0010 1100) assert_eq!(&[255u8, 0u8, 0u8, 255u8], &swizzled.data[300..304]); // Green swizzled address: 8400 (0010 0000 1101 0000) assert_eq!(&[0u8, 255u8, 0u8, 255u8], &swizzled.data[8400..8404]); // Blue swizzled address: 7680 (0001 1110 0000 0000) assert_eq!(&[0u8, 0u8, 255u8, 255u8], &swizzled.data[7680..7684]); } #[test] fn linear_to_swizzled() { // Test that the data is correctly swizzled when converting. let data = crate::create_default_lut().to_vec(); let linear = Lut3dLinear { size: 16u32, data }; let swizzled: Lut3dSwizzled = linear.into(); assert_eq!(16u32, swizzled.size); // Black swizzled address: 0 (0000 0000 0000 0000) assert_eq!(&[0u8, 0u8, 0u8, 255u8], &swizzled.data[0..4]); // Red swizzled address: 300 (0000 0001 0010 1100) assert_eq!(&[255u8, 0u8, 0u8, 255u8], &swizzled.data[300..304]); // Green swizzled address: 8400 (0010 0000 1101 0000) assert_eq!(&[0u8, 255u8, 0u8, 255u8], &swizzled.data[8400..8404]); // Blue swizzled address: 7680 (0001 1110 0000 0000) assert_eq!(&[0u8, 0u8, 255u8, 255u8], &swizzled.data[7680..7684]); } #[test] fn swizzled_to_linear() { // Test that the data is correctly deswizzled when converting. let data = crate::create_default_lut(); let mut swizzled_data = vec![0u8; crate::image_size(16, 16, 16, 4)]; swizzle(&data, &mut swizzled_data, false); let swizzled = Lut3dSwizzled { size: 16u32, data: swizzled_data.to_vec(), }; let linear: Lut3dLinear = swizzled.into(); assert_eq!(16u32, linear.size); itertools::assert_equal(data.iter(), &linear.data); } }
30.591463
100
0.586406
89a8ec87002dc29de8926c8d7ae675272a593a57
21,271
//! Reading and writing of Bifrost simulation data in native format. mod mesh; mod param; use super::{ super::{ utils::{self, AtomicOutputPath}, Endianness, Verbose, }, fdt, ParameterValue, SnapshotFormat, SnapshotParameters, SnapshotReader3, FALLBACK_SNAP_NUM, PRIMARY_VARIABLE_NAMES_HD, PRIMARY_VARIABLE_NAMES_MHD, }; use crate::{ field::ScalarField3, geometry::{ Dim3::{X, Y, Z}, In3D, }, grid::{ CoordLocation::{self, Center, LowerEdge}, Grid3, }, }; use ndarray::prelude::*; use std::{ collections::HashMap, io::{self, Write}, mem, path::{Path, PathBuf}, str, sync::Arc, }; pub use mesh::{create_grid_from_mesh_file, parse_mesh_file, write_mesh_file_from_grid}; pub use param::NativeSnapshotParameters; /// Configuration parameters for native snapshot reader. #[derive(Clone, Debug)] pub struct NativeSnapshotReaderConfig { /// Path to the parameter (.idl) file. param_file_path: PathBuf, /// Order of bytes in the binary data files. endianness: Endianness, /// Whether to print status messages while reading fields. verbose: Verbose, } /// Reader for the native output files associated with Bifrost 3D simulation snapshots. #[derive(Clone, Debug)] pub struct NativeSnapshotReader3<G> { config: NativeSnapshotReaderConfig, parameters: NativeSnapshotParameters, snap_path: PathBuf, aux_path: PathBuf, grid: Arc<G>, primary_variable_names: Vec<&'static str>, auxiliary_variable_names: Vec<String>, variable_descriptors: HashMap<String, VariableDescriptor>, } impl<G: Grid3<fdt>> NativeSnapshotReader3<G> { /// Creates a reader for a 3D Bifrost snapshot. pub fn new(config: NativeSnapshotReaderConfig) -> io::Result<Self> { let parameters = NativeSnapshotParameters::new(&config.param_file_path, config.verbose())?; let mesh_path = parameters.determine_mesh_path()?; let is_periodic = parameters.determine_grid_periodicity()?; let grid = create_grid_from_mesh_file(&mesh_path, is_periodic, config.verbose())?; Self::new_from_parameters_and_grid(config, parameters, grid) } /// Creates a reader for a 3D Bifrost snapshot. pub fn new_from_parameters_and_grid( config: NativeSnapshotReaderConfig, parameters: NativeSnapshotParameters, grid: G, ) -> io::Result<Self> { let is_mhd = parameters.determine_if_mhd()?; let primary_variable_names = if is_mhd { PRIMARY_VARIABLE_NAMES_MHD.to_vec() } else { PRIMARY_VARIABLE_NAMES_HD.to_vec() }; let auxiliary_variable_names = parameters.determine_aux_names()?; let mut variable_descriptors = HashMap::new(); Self::insert_primary_variable_descriptors(is_mhd, &mut variable_descriptors); Self::insert_auxiliary_variable_descriptors( &auxiliary_variable_names, &mut variable_descriptors, )?; let (snap_path, aux_path) = parameters.determine_snap_path()?; Ok(Self { config, parameters, snap_path, aux_path, grid: Arc::new(grid), primary_variable_names, auxiliary_variable_names, variable_descriptors, }) } /// Returns the path of the parameter (.idl) file. pub fn parameter_file_path(&self) -> &Path { self.config.param_file_path.as_path() } /// Returns the path of the primary variable (.snap) file. pub fn primary_variable_file_path(&self) -> &Path { self.snap_path.as_path() } /// Returns the path of the auxiliary variable (.aux) file. pub fn auxiliary_variable_file_path(&self) -> &Path { self.aux_path.as_path() } fn insert_primary_variable_descriptors( is_mhd: bool, variable_descriptors: &mut HashMap<String, VariableDescriptor>, ) { let is_primary = true; variable_descriptors.insert( "r".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, Center, Center), index: 0, }, ); variable_descriptors.insert( "px".to_string(), VariableDescriptor { is_primary, locations: In3D::new(LowerEdge, Center, Center), index: 1, }, ); variable_descriptors.insert( "py".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, LowerEdge, Center), index: 2, }, ); variable_descriptors.insert( "pz".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, Center, LowerEdge), index: 3, }, ); variable_descriptors.insert( "e".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, Center, Center), index: 4, }, ); if is_mhd { variable_descriptors.insert( "bx".to_string(), VariableDescriptor { is_primary, locations: In3D::new(LowerEdge, Center, Center), index: 5, }, ); variable_descriptors.insert( "by".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, LowerEdge, Center), index: 6, }, ); variable_descriptors.insert( "bz".to_string(), VariableDescriptor { is_primary, locations: In3D::new(Center, Center, LowerEdge), index: 7, }, ); } } fn insert_auxiliary_variable_descriptors( aux_variable_names: &[String], variable_descriptors: &mut HashMap<String, VariableDescriptor>, ) -> io::Result<()> { let is_primary = false; for (index, name) in aux_variable_names.iter().enumerate() { let ends_with_x = name.ends_with('x'); let ends_with_y = name.ends_with('y'); let ends_with_z = name.ends_with('z'); let locations = if (ends_with_x || ends_with_y || ends_with_z) && (name.starts_with('e') || name.starts_with('i')) { In3D::new( if ends_with_x { Center } else { LowerEdge }, if ends_with_y { Center } else { LowerEdge }, if ends_with_z { Center } else { LowerEdge }, ) } else { In3D::new( if ends_with_x { LowerEdge } else { Center }, if ends_with_y { LowerEdge } else { Center }, if ends_with_z { LowerEdge } else { Center }, ) }; variable_descriptors.insert( name.to_string(), VariableDescriptor { is_primary, locations, index, }, ); } Ok(()) } fn get_variable_descriptor(&self, name: &str) -> io::Result<&VariableDescriptor> { match self.variable_descriptors.get(name) { Some(variable) => Ok(variable), None => Err(io::Error::new( io::ErrorKind::NotFound, format!("Variable {} not found", name), )), } } } impl<G: Grid3<fdt>> SnapshotReader3<G> for NativeSnapshotReader3<G> { type Parameters = NativeSnapshotParameters; const FORMAT: SnapshotFormat = SnapshotFormat::Native; fn path(&self) -> &Path { self.config.param_file_path() } fn verbose(&self) -> Verbose { self.config.verbose() } fn grid(&self) -> &G { self.grid.as_ref() } fn arc_with_grid(&self) -> Arc<G> { Arc::clone(&self.grid) } fn parameters(&self) -> &Self::Parameters { &self.parameters } fn endianness(&self) -> Endianness { self.config.endianness } fn primary_variable_names(&self) -> Vec<&str> { self.primary_variable_names.clone() } fn auxiliary_variable_names(&self) -> Vec<&str> { self.auxiliary_variable_names .iter() .map(|s| s.as_str()) .collect() } fn obtain_snap_name_and_num(&self) -> (String, Option<u32>) { super::extract_name_and_num_from_snapshot_path(self.config.param_file_path()) } fn reread(&mut self) -> io::Result<()> { let Self { parameters, snap_path, aux_path, grid, primary_variable_names, auxiliary_variable_names, variable_descriptors, .. } = Self::new(self.config.clone())?; self.parameters = parameters; self.snap_path = snap_path; self.aux_path = aux_path; self.grid = grid; self.primary_variable_names = primary_variable_names; self.auxiliary_variable_names = auxiliary_variable_names; self.variable_descriptors = variable_descriptors; Ok(()) } fn read_scalar_field(&self, variable_name: &str) -> io::Result<ScalarField3<fdt, G>> { let variable_descriptor = self.get_variable_descriptor(variable_name)?; let file_path = if variable_descriptor.is_primary { &self.snap_path } else { &self.aux_path }; if self.config.verbose.is_yes() { println!( "Reading {} from {}", variable_name, file_path.file_name().unwrap().to_string_lossy() ); } let shape = self.grid.shape(); let number_of_values = shape[X] * shape[Y] * shape[Z]; let byte_offset = number_of_values * variable_descriptor.index * mem::size_of::<fdt>(); let buffer = utils::read_from_binary_file( file_path, number_of_values, byte_offset, self.config.endianness, )?; let values = Array::from_shape_vec((shape[X], shape[Y], shape[Z]).f(), buffer).unwrap(); Ok(ScalarField3::new( variable_name.to_string(), Arc::clone(&self.grid), variable_descriptor.locations.clone(), values, )) } } impl NativeSnapshotReaderConfig { /// Creates a new set of snapshot reader configuration parameters. pub fn new<P: AsRef<Path>>( param_file_path: P, endianness: Endianness, verbose: Verbose, ) -> Self { NativeSnapshotReaderConfig { param_file_path: param_file_path.as_ref().to_path_buf(), endianness, verbose, } } pub fn verbose(&self) -> Verbose { self.verbose } pub fn param_file_path(&self) -> &Path { self.param_file_path.as_path() } } /// Writes modified data associated with the given snapshot to native snapshot files at the given path. pub fn write_modified_snapshot<P, GIN, RIN, GOUT, FP>( reader: &RIN, new_grid: Option<Arc<GOUT>>, quantity_names: &[&str], mut modified_parameters: HashMap<&str, ParameterValue>, field_producer: FP, output_param_path: P, is_scratch: bool, write_mesh_file: bool, automatic_overwrite: bool, protected_file_types: &[&str], verbose: Verbose, ) -> io::Result<()> where P: AsRef<Path>, GIN: Grid3<fdt>, RIN: SnapshotReader3<GIN>, GOUT: Grid3<fdt>, FP: Fn(&str) -> io::Result<ScalarField3<fdt, GOUT>>, { let output_param_path = output_param_path.as_ref(); let (snap_name, snap_num) = super::extract_name_and_num_from_snapshot_path(output_param_path); let snap_num = snap_num.unwrap_or(if is_scratch { 1 } else { FALLBACK_SNAP_NUM }); modified_parameters.insert( "snapname", ParameterValue::Str(format!("\"{}\"", snap_name)), ); modified_parameters.insert( "isnap", ParameterValue::Str(format!("{}{}", if is_scratch { "-" } else { "" }, snap_num)), ); modified_parameters.insert( "meshfile", ParameterValue::Str(format!("\"{}.mesh\"", snap_name)), ); let (included_primary_variable_names, included_auxiliary_variable_names) = reader.classify_variable_names(quantity_names); modified_parameters.insert( "aux", ParameterValue::Str(format!( "\"{}\"", included_auxiliary_variable_names.join(" ") )), ); let has_primary = !included_primary_variable_names.is_empty(); let has_auxiliary = !included_auxiliary_variable_names.is_empty(); let atomic_param_path = AtomicOutputPath::new(output_param_path)?; let mut atomic_mesh_path = if write_mesh_file { Some(AtomicOutputPath::new( atomic_param_path .target_path() .with_file_name(format!("{}.mesh", snap_name)), )?) } else { None }; let atomic_snap_path = AtomicOutputPath::new(if is_scratch { atomic_param_path .target_path() .with_file_name(format!("{}.snap.scr", snap_name)) } else { atomic_param_path.target_path().with_extension("snap") })?; let atomic_aux_path = AtomicOutputPath::new(if is_scratch { atomic_param_path .target_path() .with_file_name(format!("{}.aux.scr", snap_name)) } else { atomic_param_path.target_path().with_extension("aux") })?; let write_param_file = !atomic_param_path.write_should_be_skipped(automatic_overwrite, protected_file_types); let write_mesh_file = if let Some(atomic_mesh_path) = &atomic_mesh_path { !atomic_mesh_path.write_should_be_skipped(automatic_overwrite, protected_file_types) } else { false }; let write_snap_file = has_primary && !atomic_snap_path.write_should_be_skipped(automatic_overwrite, protected_file_types); let write_aux_file = has_auxiliary && !atomic_aux_path.write_should_be_skipped(automatic_overwrite, protected_file_types); let output_param_file_name = atomic_param_path .target_path() .file_name() .unwrap() .to_string_lossy(); let output_snap_file_name = atomic_snap_path .target_path() .file_name() .unwrap() .to_string_lossy(); let output_aux_file_name = atomic_aux_path .target_path() .file_name() .unwrap() .to_string_lossy(); macro_rules! perform_writing { ($grid:expr) => {{ if write_param_file { let mut new_parameters = reader.parameters().clone(); new_parameters.modify_values(modified_parameters); if verbose.is_yes() { println!("Writing parameters to {}", output_param_file_name); } utils::write_text_file( &new_parameters.native_text_representation(), atomic_param_path.temporary_path(), )?; } if write_mesh_file { let atomic_mesh_path = atomic_mesh_path.as_ref().unwrap(); if verbose.is_yes() { println!( "Writing grid to {}", atomic_mesh_path .target_path() .file_name() .unwrap() .to_string_lossy() ); } mesh::write_mesh_file_from_grid($grid, atomic_mesh_path.temporary_path())?; } if write_snap_file { write_3d_snapfile( atomic_snap_path.temporary_path(), &included_primary_variable_names, &|name| { field_producer(name).map(|field| { if verbose.is_yes() { println!("Writing {} to {}", name, output_snap_file_name); } field.into_values() }) }, reader.endianness(), )?; } if write_aux_file { write_3d_snapfile( atomic_aux_path.temporary_path(), &included_auxiliary_variable_names, &|name| { field_producer(name).map(|field| { if verbose.is_yes() { println!("Writing {} to {}", name, output_aux_file_name); } field.into_values() }) }, reader.endianness(), )?; } if write_param_file { atomic_param_path.perform_replace()?; } if write_mesh_file { atomic_mesh_path.take().unwrap().perform_replace()?; } if write_snap_file { atomic_snap_path.perform_replace()?; } if write_aux_file { atomic_aux_path.perform_replace()?; } }}; } if let Some(new_grid) = new_grid { let shape = new_grid.shape(); let average_grid_cell_extents = new_grid.average_grid_cell_extents(); modified_parameters.insert("mx", ParameterValue::Int(shape[X] as i64)); modified_parameters.insert("my", ParameterValue::Int(shape[Y] as i64)); modified_parameters.insert("mz", ParameterValue::Int(shape[Z] as i64)); modified_parameters.insert("dx", ParameterValue::Float(average_grid_cell_extents[X])); modified_parameters.insert("dy", ParameterValue::Float(average_grid_cell_extents[Y])); modified_parameters.insert("dz", ParameterValue::Float(average_grid_cell_extents[Z])); perform_writing!(new_grid.as_ref()); } else { perform_writing!(reader.grid()); }; Ok(()) } /// Writes arrays of variable values sequentially into a binary file. /// /// # Parameters /// /// - `output_file_path`: Path where the output file should be written. /// - `variable_names`: Names of the variables to write. /// - `variable_value_producer`: Closure producing an array of values given the variable name. /// - `endianness`: Endianness of the output data. /// /// # Returns /// /// A `Result` which is either: /// /// - `Ok`: Writing was completed successfully. /// - `Err`: Contains an error encountered while trying to create or write to the file. /// /// # Type parameters /// /// - `P`: A type that can be treated as a reference to a `Path`. /// - `N`: A type that can be treated as a reference to a `str`. /// - `V`: A function type taking a reference to a string slice and returning a reference to a 3D array. fn write_3d_snapfile<P, N, V>( output_file_path: P, variable_names: &[N], variable_value_producer: &V, endianness: Endianness, ) -> io::Result<()> where P: AsRef<Path>, N: AsRef<str>, V: Fn(&str) -> io::Result<Array3<fdt>>, { let output_file_path = output_file_path.as_ref(); let number_of_variables = variable_names.len(); assert!( number_of_variables > 0, "Number of variables must larger than zero." ); let name = variable_names[0].as_ref(); let variable_values = variable_value_producer(name)?; let array_length = variable_values.len(); let float_size = mem::size_of::<fdt>(); let byte_buffer_size = array_length * float_size; let mut byte_buffer = vec![0_u8; byte_buffer_size]; let mut file = utils::create_file_and_required_directories(output_file_path)?; file.set_len(byte_buffer_size as u64)?; utils::write_into_byte_buffer( variable_values .as_slice_memory_order() .expect("Values array not contiguous"), &mut byte_buffer, 0, endianness, ); file.write_all(&byte_buffer)?; for name in variable_names.iter().skip(1) { let name = name.as_ref(); let variable_values = variable_value_producer(name)?; assert_eq!( variable_values.len(), array_length, "All variable arrays must have the same length." ); utils::write_into_byte_buffer( variable_values .as_slice_memory_order() .expect("Values array not contiguous"), &mut byte_buffer, 0, endianness, ); file.write_all(&byte_buffer)?; } Ok(()) } #[derive(Clone, Debug)] struct VariableDescriptor { is_primary: bool, locations: In3D<CoordLocation>, index: usize, }
32.375951
104
0.569367
716ff992c00de51d8fad7c27092deed8ea1c2718
386
use crate::NewResult; use serde::{de::DeserializeOwned, Serialize}; pub trait Serializable<T = Self>: Serialize + Sized + DeserializeOwned { fn to_json(&self) -> NewResult<String> { let s = serde_json::to_string_pretty(self)?; Ok(s) } fn from_json(s: &str) -> NewResult<Self> { let result: Self = serde_json::from_str(s)?; Ok(result) } }
29.692308
72
0.621762
cc8b3c7512d252c0dbd053d7650cd09c9441fa26
6,177
#![deny(missing_docs)] use vector_common::EventDataEq; use super::{Event, EventMetadata, LogEvent, Metric, TraceEvent}; /// A wrapper for references to inner event types, where reconstituting /// a full `Event` from a `LogEvent` or `Metric` might be inconvenient. #[derive(Clone, Copy, Debug, PartialEq)] pub enum EventRef<'a> { /// Reference to a `LogEvent` Log(&'a LogEvent), /// Reference to a `Metric` Metric(&'a Metric), /// Reference to a `TraceEvent` Trace(&'a TraceEvent), } impl<'a> EventRef<'a> { /// Extract the `LogEvent` reference in this. /// /// # Panics /// /// This will panic if this is not a `LogEvent` reference. pub fn as_log(self) -> &'a LogEvent { match self { Self::Log(log) => log, _ => panic!("Failed type coercion, {:?} is not a log reference", self), } } /// Convert this reference into a new `LogEvent` by cloning. /// /// # Panics /// /// This will panic if this is not a `LogEvent` reference. pub fn into_log(self) -> LogEvent { match self { Self::Log(log) => log.clone(), _ => panic!("Failed type coercion, {:?} is not a log reference", self), } } /// Extract the `Metric` reference in this. /// /// # Panics /// /// This will panic if this is not a `Metric` reference. pub fn as_metric(self) -> &'a Metric { match self { Self::Metric(metric) => metric, _ => panic!("Failed type coercion, {:?} is not a metric reference", self), } } /// Convert this reference into a new `Metric` by cloning. /// /// # Panics /// /// This will panic if this is not a `Metric` reference. pub fn into_metric(self) -> Metric { match self { Self::Metric(metric) => metric.clone(), _ => panic!("Failed type coercion, {:?} is not a metric reference", self), } } } impl<'a> From<&'a Event> for EventRef<'a> { fn from(event: &'a Event) -> Self { match event { Event::Log(log) => EventRef::Log(log), Event::Metric(metric) => EventRef::Metric(metric), Event::Trace(trace) => EventRef::Trace(trace), } } } impl<'a> From<&'a LogEvent> for EventRef<'a> { fn from(log: &'a LogEvent) -> Self { Self::Log(log) } } impl<'a> From<&'a Metric> for EventRef<'a> { fn from(metric: &'a Metric) -> Self { Self::Metric(metric) } } impl<'a> From<&'a TraceEvent> for EventRef<'a> { fn from(trace: &'a TraceEvent) -> Self { Self::Trace(trace) } } impl<'a> EventDataEq<Event> for EventRef<'a> { fn event_data_eq(&self, that: &Event) -> bool { match (self, that) { (Self::Log(a), Event::Log(b)) => a.event_data_eq(b), (Self::Metric(a), Event::Metric(b)) => a.event_data_eq(b), (Self::Trace(a), Event::Trace(b)) => a.event_data_eq(b), _ => false, } } } /// A wrapper for mutable references to inner event types, where reconstituting /// a full `Event` from a `LogEvent` or `Metric` might be inconvenient. #[derive(Debug)] pub enum EventMutRef<'a> { /// Reference to a `LogEvent` Log(&'a mut LogEvent), /// Reference to a `Metric` Metric(&'a mut Metric), /// Reference to a `TraceEvent` Trace(&'a mut TraceEvent), } impl<'a> EventMutRef<'a> { /// Extract the `LogEvent` reference in this. /// /// # Panics /// /// This will panic if this is not a `LogEvent` reference. pub fn as_log(self) -> &'a LogEvent { match self { Self::Log(log) => log, _ => panic!("Failed type coercion, {:?} is not a log reference", self), } } /// Convert this reference into a new `LogEvent` by cloning. /// /// # Panics /// /// This will panic if this is not a `LogEvent` reference. pub fn into_log(self) -> LogEvent { match self { Self::Log(log) => log.clone(), _ => panic!("Failed type coercion, {:?} is not a log reference", self), } } /// Extract the `Metric` reference in this. /// /// # Panics /// /// This will panic if this is not a `Metric` reference. pub fn as_metric(self) -> &'a Metric { match self { Self::Metric(metric) => metric, _ => panic!("Failed type coercion, {:?} is not a metric reference", self), } } /// Convert this reference into a new `Metric` by cloning. /// /// # Panics /// /// This will panic if this is not a `Metric` reference. pub fn into_metric(self) -> Metric { match self { Self::Metric(metric) => metric.clone(), _ => panic!("Failed type coercion, {:?} is not a metric reference", self), } } /// Access the metadata in this reference. pub fn metadata(&self) -> &EventMetadata { match self { Self::Log(event) => event.metadata(), Self::Metric(event) => event.metadata(), Self::Trace(event) => event.metadata(), } } /// Access the metadata mutably in this reference. pub fn metadata_mut(&mut self) -> &mut EventMetadata { match self { Self::Log(event) => event.metadata_mut(), Self::Metric(event) => event.metadata_mut(), Self::Trace(event) => event.metadata_mut(), } } } impl<'a> From<&'a mut Event> for EventMutRef<'a> { fn from(event: &'a mut Event) -> Self { match event { Event::Log(event) => event.into(), Event::Metric(event) => event.into(), Event::Trace(event) => event.into(), } } } impl<'a> From<&'a mut LogEvent> for EventMutRef<'a> { fn from(log: &'a mut LogEvent) -> Self { Self::Log(log) } } impl<'a> From<&'a mut Metric> for EventMutRef<'a> { fn from(metric: &'a mut Metric) -> Self { Self::Metric(metric) } } impl<'a> From<&'a mut TraceEvent> for EventMutRef<'a> { fn from(trace: &'a mut TraceEvent) -> Self { Self::Trace(trace) } }
28.730233
86
0.544601
dbcb24339998f990d33d2007d1a7f3fa0bdc60df
3,838
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::CPUIRQSEL4 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `EV`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EVR { #[doc = "Event from AON_RTC, controlled by the AON_RTC:CTL.COMB_EV_MASK setting"] AON_RTC_COMB, #[doc = r" Reserved"] _Reserved(u8), } impl EVR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { EVR::AON_RTC_COMB => 7, EVR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> EVR { match value { 7 => EVR::AON_RTC_COMB, i => EVR::_Reserved(i), } } #[doc = "Checks if the value of the field is `AON_RTC_COMB`"] #[inline] pub fn is_aon_rtc_comb(&self) -> bool { *self == EVR::AON_RTC_COMB } } #[doc = "Values that can be written to the field `EV`"] pub enum EVW { #[doc = "Event from AON_RTC, controlled by the AON_RTC:CTL.COMB_EV_MASK setting"] AON_RTC_COMB, } impl EVW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { EVW::AON_RTC_COMB => 7, } } } #[doc = r" Proxy"] pub struct _EVW<'a> { w: &'a mut W, } impl<'a> _EVW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: EVW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "Event from AON_RTC, controlled by the AON_RTC:CTL.COMB_EV_MASK setting"] #[inline] pub fn aon_rtc_comb(self) -> &'a mut W { self.variant(EVW::AON_RTC_COMB) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 127; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:6 - 6:0\\] Read only selection value"] #[inline] pub fn ev(&self) -> EVR { EVR::_from({ const MASK: u8 = 127; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 7 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:6 - 6:0\\] Read only selection value"] #[inline] pub fn ev(&mut self) -> _EVW { _EVW { w: self } } }
25.417219
85
0.515373
3892d6d75b69cba2c5eecfcf8b39a96217c170a0
37,988
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license. // Do not add any dependency to modules.rs! // modules.rs is complex and should remain decoupled from isolate.rs to keep the // Isolate struct from becoming too bloating for users who do not need // asynchronous module loading. use rusty_v8 as v8; use crate::any_error::ErrBox; use crate::bindings; use crate::js_errors::CoreJSError; use crate::js_errors::V8Exception; use crate::ops::*; use crate::shared_queue::SharedQueue; use crate::shared_queue::RECOMMENDED_SIZE; use futures::future::FutureExt; use futures::future::TryFutureExt; use futures::stream::select; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use futures::task::AtomicWaker; use futures::Future; use libc::c_void; use std::collections::HashMap; use std::convert::From; use std::error::Error; use std::fmt; use std::ops::{Deref, DerefMut}; use std::option::Option; use std::pin::Pin; use std::sync::{Arc, Mutex, Once}; use std::task::Context; use std::task::Poll; /// A ZeroCopyBuf encapsulates a slice that's been borrowed from a JavaScript /// ArrayBuffer object. JavaScript objects can normally be garbage collected, /// but the existence of a ZeroCopyBuf inhibits this until it is dropped. It /// behaves much like an Arc<[u8]>, although a ZeroCopyBuf currently can't be /// cloned. pub struct ZeroCopyBuf { backing_store: v8::SharedRef<v8::BackingStore>, byte_offset: usize, byte_length: usize, } unsafe impl Send for ZeroCopyBuf {} impl ZeroCopyBuf { pub fn new(view: v8::Local<v8::ArrayBufferView>) -> Self { let backing_store = view.buffer().unwrap().get_backing_store(); let byte_offset = view.byte_offset(); let byte_length = view.byte_length(); Self { backing_store, byte_offset, byte_length, } } } impl Deref for ZeroCopyBuf { type Target = [u8]; fn deref(&self) -> &[u8] { let buf = unsafe { &**self.backing_store.get() }; &buf[self.byte_offset..self.byte_offset + self.byte_length] } } impl DerefMut for ZeroCopyBuf { fn deref_mut(&mut self) -> &mut [u8] { let buf = unsafe { &mut **self.backing_store.get() }; &mut buf[self.byte_offset..self.byte_offset + self.byte_length] } } impl AsRef<[u8]> for ZeroCopyBuf { fn as_ref(&self) -> &[u8] { &*self } } impl AsMut<[u8]> for ZeroCopyBuf { fn as_mut(&mut self) -> &mut [u8] { &mut *self } } pub enum SnapshotConfig { Borrowed(v8::StartupData<'static>), Owned(v8::OwnedStartupData), } impl From<&'static [u8]> for SnapshotConfig { fn from(sd: &'static [u8]) -> Self { Self::Borrowed(v8::StartupData::new(sd)) } } impl From<v8::OwnedStartupData> for SnapshotConfig { fn from(sd: v8::OwnedStartupData) -> Self { Self::Owned(sd) } } impl Deref for SnapshotConfig { type Target = v8::StartupData<'static>; fn deref(&self) -> &Self::Target { match self { Self::Borrowed(sd) => sd, Self::Owned(sd) => &*sd, } } } /// Stores a script used to initalize a Isolate pub struct Script<'a> { pub source: &'a str, pub filename: &'a str, } // TODO(ry) It's ugly that we have both Script and OwnedScript. Ideally we // wouldn't expose such twiddly complexity. struct OwnedScript { pub source: String, pub filename: String, } impl From<Script<'_>> for OwnedScript { fn from(s: Script) -> OwnedScript { OwnedScript { source: s.source.to_string(), filename: s.filename.to_string(), } } } /// Represents data used to initialize isolate at startup /// either a binary snapshot or a javascript source file /// in the form of the StartupScript struct. pub enum StartupData<'a> { Script(Script<'a>), Snapshot(&'static [u8]), OwnedSnapshot(v8::OwnedStartupData), None, } type JSErrorCreateFn = dyn Fn(V8Exception) -> ErrBox; type IsolateErrorHandleFn = dyn FnMut(ErrBox) -> Result<(), ErrBox>; /// A single execution context of JavaScript. Corresponds roughly to the "Web /// Worker" concept in the DOM. An Isolate is a Future that can be used with /// Tokio. The Isolate future complete when there is an error or when all /// pending ops have completed. /// /// Ops are created in JavaScript by calling Deno.core.dispatch(), and in Rust /// by implementing dispatcher function that takes control buffer and optional zero copy buffer /// as arguments. An async Op corresponds exactly to a Promise in JavaScript. #[allow(unused)] pub struct Isolate { pub(crate) v8_isolate: Option<v8::OwnedIsolate>, snapshot_creator: Option<v8::SnapshotCreator>, has_snapshotted: bool, snapshot: Option<SnapshotConfig>, pub(crate) last_exception: Option<String>, pub(crate) global_context: v8::Global<v8::Context>, pub(crate) shared_ab: v8::Global<v8::SharedArrayBuffer>, pub(crate) js_recv_cb: v8::Global<v8::Function>, pub(crate) pending_promise_exceptions: HashMap<i32, v8::Global<v8::Value>>, shared_isolate_handle: Arc<Mutex<Option<*mut v8::Isolate>>>, js_error_create: Arc<JSErrorCreateFn>, needs_init: bool, pub(crate) shared: SharedQueue, pending_ops: FuturesUnordered<PendingOpFuture>, pending_unref_ops: FuturesUnordered<PendingOpFuture>, have_unpolled_ops: bool, startup_script: Option<OwnedScript>, pub op_registry: Arc<OpRegistry>, waker: AtomicWaker, error_handler: Option<Box<IsolateErrorHandleFn>>, } // TODO(ry) this shouldn't be necessary, v8::OwnedIsolate should impl Send. unsafe impl Send for Isolate {} impl Drop for Isolate { fn drop(&mut self) { // remove shared_libdeno_isolate reference *self.shared_isolate_handle.lock().unwrap() = None; // TODO Too much boiler plate. // <Boilerplate> let isolate = self.v8_isolate.take().unwrap(); // Clear persistent handles we own. { let mut locker = v8::Locker::new(&isolate); let mut hs = v8::HandleScope::new(locker.enter()); let scope = hs.enter(); // </Boilerplate> self.global_context.reset(scope); self.shared_ab.reset(scope); self.js_recv_cb.reset(scope); for (_key, handle) in self.pending_promise_exceptions.iter_mut() { handle.reset(scope); } } if let Some(creator) = self.snapshot_creator.take() { // TODO(ry) V8 has a strange assert which prevents a SnapshotCreator from // being deallocated if it hasn't created a snapshot yet. // https://github.com/v8/v8/blob/73212783fbd534fac76cc4b66aac899c13f71fc8/src/api.cc#L603 // If that assert is removed, this if guard could be removed. // WARNING: There may be false positive LSAN errors here. std::mem::forget(isolate); if self.has_snapshotted { drop(creator); } } else { drop(isolate); } } } static DENO_INIT: Once = Once::new(); #[allow(clippy::missing_safety_doc)] pub unsafe fn v8_init() { let platform = v8::new_default_platform(); v8::V8::initialize_platform(platform); v8::V8::initialize(); // TODO(ry) This makes WASM compile synchronously. Eventually we should // remove this to make it work asynchronously too. But that requires getting // PumpMessageLoop and RunMicrotasks setup correctly. // See https://github.com/denoland/deno/issues/2544 let argv = vec![ "".to_string(), "--no-wasm-async-compilation".to_string(), "--harmony-top-level-await".to_string(), ]; v8::V8::set_flags_from_command_line(argv); } impl Isolate { /// startup_data defines the snapshot or script used at startup to initialize /// the isolate. pub fn new(startup_data: StartupData, will_snapshot: bool) -> Box<Self> { DENO_INIT.call_once(|| { unsafe { v8_init() }; }); let mut load_snapshot: Option<SnapshotConfig> = None; let mut startup_script: Option<OwnedScript> = None; // Separate into Option values for each startup type match startup_data { StartupData::Script(d) => { startup_script = Some(d.into()); } StartupData::Snapshot(d) => { load_snapshot = Some(d.into()); } StartupData::OwnedSnapshot(d) => { load_snapshot = Some(d.into()); } StartupData::None => {} }; let mut global_context = v8::Global::<v8::Context>::new(); let (mut isolate, maybe_snapshot_creator) = if will_snapshot { // TODO(ry) Support loading snapshots before snapshotting. assert!(load_snapshot.is_none()); let mut creator = v8::SnapshotCreator::new(Some(&bindings::EXTERNAL_REFERENCES)); let isolate = unsafe { creator.get_owned_isolate() }; let isolate = Isolate::setup_isolate(isolate); let mut locker = v8::Locker::new(&isolate); let scope = locker.enter(); let mut hs = v8::HandleScope::new(scope); let scope = hs.enter(); let context = bindings::initialize_context(scope); global_context.set(scope, context); creator.set_default_context(context); (isolate, Some(creator)) } else { let mut params = v8::Isolate::create_params(); params.set_array_buffer_allocator(v8::new_default_allocator()); params.set_external_references(&bindings::EXTERNAL_REFERENCES); if let Some(ref mut snapshot) = load_snapshot { params.set_snapshot_blob(snapshot); } let isolate = v8::Isolate::new(params); let isolate = Isolate::setup_isolate(isolate); let mut locker = v8::Locker::new(&isolate); let scope = locker.enter(); let mut hs = v8::HandleScope::new(scope); let scope = hs.enter(); let context = match load_snapshot { Some(_) => v8::Context::new(scope), None => { // If no snapshot is provided, we initialize the context with empty // main source code and source maps. bindings::initialize_context(scope) } }; global_context.set(scope, context); (isolate, None) }; let shared = SharedQueue::new(RECOMMENDED_SIZE); let needs_init = true; let core_isolate = Self { v8_isolate: None, last_exception: None, global_context, pending_promise_exceptions: HashMap::new(), shared_ab: v8::Global::<v8::SharedArrayBuffer>::new(), js_recv_cb: v8::Global::<v8::Function>::new(), snapshot_creator: maybe_snapshot_creator, snapshot: load_snapshot, has_snapshotted: false, shared_isolate_handle: Arc::new(Mutex::new(None)), js_error_create: Arc::new(CoreJSError::from_v8_exception), shared, needs_init, pending_ops: FuturesUnordered::new(), pending_unref_ops: FuturesUnordered::new(), have_unpolled_ops: false, startup_script, op_registry: Arc::new(OpRegistry::new()), waker: AtomicWaker::new(), error_handler: None, }; let mut boxed_isolate = Box::new(core_isolate); { let core_isolate_ptr: *mut Self = Box::into_raw(boxed_isolate); unsafe { isolate.set_data(0, core_isolate_ptr as *mut c_void) }; boxed_isolate = unsafe { Box::from_raw(core_isolate_ptr) }; let shared_handle_ptr = &mut *isolate; *boxed_isolate.shared_isolate_handle.lock().unwrap() = Some(shared_handle_ptr); boxed_isolate.v8_isolate = Some(isolate); } boxed_isolate } pub fn setup_isolate(mut isolate: v8::OwnedIsolate) -> v8::OwnedIsolate { isolate.set_capture_stack_trace_for_uncaught_exceptions(true, 10); isolate.set_promise_reject_callback(bindings::promise_reject_callback); isolate.add_message_listener(bindings::message_callback); isolate } pub fn exception_to_err_result<'a, T>( &mut self, scope: &mut (impl v8::ToLocal<'a> + v8::InContext), exception: v8::Local<v8::Value>, ) -> Result<T, ErrBox> { self.handle_exception(scope, exception); self.check_last_exception().map(|_| unreachable!()) } pub fn handle_exception<'a>( &mut self, scope: &mut (impl v8::ToLocal<'a> + v8::InContext), exception: v8::Local<v8::Value>, ) { // Use a HandleScope because the functions below create a lot of // local handles (in particular, `encode_message_as_json()` does). let mut hs = v8::HandleScope::new(scope); let scope = hs.enter(); let is_terminating_exception = scope.isolate().is_execution_terminating(); let mut exception = exception; if is_terminating_exception { // TerminateExecution was called. Cancel exception termination so that the // exception can be created.. scope.isolate().cancel_terminate_execution(); // Maybe make a new exception object. if exception.is_null_or_undefined() { let exception_str = v8::String::new(scope, "execution terminated").unwrap(); exception = v8::Exception::error(scope, exception_str); } } let message = v8::Exception::create_message(scope, exception); let json_str = self.encode_message_as_json(scope, message); self.last_exception = Some(json_str); if is_terminating_exception { // Re-enable exception termination. scope.isolate().terminate_execution(); } } pub fn encode_message_as_json<'a>( &mut self, scope: &mut (impl v8::ToLocal<'a> + v8::InContext), message: v8::Local<v8::Message>, ) -> String { let context = scope.isolate().get_current_context(); let json_obj = bindings::encode_message_as_object(scope, message); let json_string = v8::json::stringify(context, json_obj.into()).unwrap(); json_string.to_rust_string_lossy(scope) } /// Defines the how Deno.core.dispatch() acts. /// Called whenever Deno.core.dispatch() is called in JavaScript. zero_copy_buf /// corresponds to the second argument of Deno.core.dispatch(). /// /// Requires runtime to explicitly ask for op ids before using any of the ops. pub fn register_op<F>(&self, name: &str, op: F) -> OpId where F: Fn(&[u8], Option<ZeroCopyBuf>) -> CoreOp + 'static, { self.op_registry.register(name, op) } /// Allows a callback to be set whenever a V8 exception is made. This allows /// the caller to wrap the V8Exception into an error. By default this callback /// is set to CoreJSError::from_v8_exception. pub fn set_js_error_create<F>(&mut self, f: F) where F: Fn(V8Exception) -> ErrBox + 'static, { self.js_error_create = Arc::new(f); } /// Get a thread safe handle on the isolate. pub fn shared_isolate_handle(&mut self) -> IsolateHandle { IsolateHandle { shared_isolate: self.shared_isolate_handle.clone(), } } /// Executes a bit of built-in JavaScript to provide Deno.sharedQueue. pub(crate) fn shared_init(&mut self) { if self.needs_init { self.needs_init = false; js_check( self.execute("shared_queue.js", include_str!("shared_queue.js")), ); // Maybe execute the startup script. if let Some(s) = self.startup_script.take() { self.execute(&s.filename, &s.source).unwrap() } } } pub fn dispatch_op<'s>( &mut self, scope: &mut (impl v8::ToLocal<'s> + v8::InContext), op_id: OpId, control_buf: &[u8], zero_copy_buf: Option<ZeroCopyBuf>, ) -> Option<(OpId, Box<[u8]>)> { let maybe_op = self.op_registry.call(op_id, control_buf, zero_copy_buf); let op = match maybe_op { Some(op) => op, None => { let message = v8::String::new(scope, &format!("Unknown op id: {}", op_id)).unwrap(); let exception = v8::Exception::type_error(scope, message); scope.isolate().throw_exception(exception); return None; } }; debug_assert_eq!(self.shared.size(), 0); match op { Op::Sync(buf) => { // For sync messages, we always return the response via Deno.core.send's // return value. Sync messages ignore the op_id. let op_id = 0; Some((op_id, buf)) } Op::Async(fut) => { let fut2 = fut.map_ok(move |buf| (op_id, buf)); self.pending_ops.push(fut2.boxed_local()); self.have_unpolled_ops = true; None } Op::AsyncUnref(fut) => { let fut2 = fut.map_ok(move |buf| (op_id, buf)); self.pending_unref_ops.push(fut2.boxed_local()); self.have_unpolled_ops = true; None } } } /// Executes traditional JavaScript code (traditional = not ES modules) /// /// ErrBox can be downcast to a type that exposes additional information about /// the V8 exception. By default this type is CoreJSError, however it may be a /// different type if Isolate::set_js_error_create() has been used. pub fn execute( &mut self, js_filename: &str, js_source: &str, ) -> Result<(), ErrBox> { self.shared_init(); let isolate = self.v8_isolate.as_ref().unwrap(); let mut locker = v8::Locker::new(isolate); assert!(!self.global_context.is_empty()); let mut hs = v8::HandleScope::new(locker.enter()); let scope = hs.enter(); let context = self.global_context.get(scope).unwrap(); let mut cs = v8::ContextScope::new(scope, context); let scope = cs.enter(); let source = v8::String::new(scope, js_source).unwrap(); let name = v8::String::new(scope, js_filename).unwrap(); let origin = bindings::script_origin(scope, name); let mut try_catch = v8::TryCatch::new(scope); let tc = try_catch.enter(); let mut script = v8::Script::compile(scope, context, source, Some(&origin)).unwrap(); match script.run(scope, context) { Some(_) => Ok(()), None => { assert!(tc.has_caught()); let exception = tc.exception().unwrap(); self.exception_to_err_result(scope, exception) } } } pub(crate) fn check_last_exception(&mut self) -> Result<(), ErrBox> { match self.last_exception.take() { None => Ok(()), Some(json_str) => { let v8_exception = V8Exception::from_json(&json_str).unwrap(); let js_error = (self.js_error_create)(v8_exception); Err(js_error) } } } pub(crate) fn attach_handle_to_error( &mut self, scope: &mut impl v8::InIsolate, err: ErrBox, handle: v8::Local<v8::Value>, ) -> ErrBox { ErrWithV8Handle::new(scope, err, handle).into() } fn check_promise_exceptions<'s>( &mut self, scope: &mut (impl v8::ToLocal<'s> + v8::InContext), ) -> Result<(), ErrBox> { if let Some(&key) = self.pending_promise_exceptions.keys().next() { let mut handle = self.pending_promise_exceptions.remove(&key).unwrap(); let exception = handle.get(scope).expect("empty error handle"); handle.reset(scope); self.exception_to_err_result(scope, exception) } else { Ok(()) } } fn async_op_response<'s>( &mut self, scope: &mut (impl v8::ToLocal<'s> + v8::InContext), maybe_buf: Option<(OpId, Box<[u8]>)>, ) -> Result<(), ErrBox> { let context = scope.isolate().get_current_context(); let global: v8::Local<v8::Value> = context.global(scope).into(); let js_recv_cb = self .js_recv_cb .get(scope) .expect("Deno.core.recv has not been called."); // TODO(piscisaureus): properly integrate TryCatch in the scope chain. let mut try_catch = v8::TryCatch::new(scope); let tc = try_catch.enter(); match maybe_buf { Some((op_id, buf)) => { let op_id: v8::Local<v8::Value> = v8::Integer::new(scope, op_id as i32).into(); let ui8: v8::Local<v8::Value> = bindings::boxed_slice_to_uint8array(scope, buf).into(); js_recv_cb.call(scope, context, global, &[op_id, ui8]) } None => js_recv_cb.call(scope, context, global, &[]), }; match tc.exception() { None => Ok(()), Some(exception) => self.exception_to_err_result(scope, exception), } } /// Takes a snapshot. The isolate should have been created with will_snapshot /// set to true. /// /// ErrBox can be downcast to a type that exposes additional information about /// the V8 exception. By default this type is CoreJSError, however it may be a /// different type if Isolate::set_js_error_create() has been used. pub fn snapshot(&mut self) -> Result<v8::OwnedStartupData, ErrBox> { assert!(self.snapshot_creator.is_some()); let isolate = self.v8_isolate.as_ref().unwrap(); let mut locker = v8::Locker::new(isolate); let mut hs = v8::HandleScope::new(locker.enter()); let scope = hs.enter(); self.global_context.reset(scope); let snapshot_creator = self.snapshot_creator.as_mut().unwrap(); let snapshot = snapshot_creator .create_blob(v8::FunctionCodeHandling::Keep) .unwrap(); self.has_snapshotted = true; self.check_last_exception().map(|_| snapshot) } } impl Future for Isolate { type Output = Result<(), ErrBox>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { let inner = self.get_mut(); inner.waker.register(cx.waker()); inner.shared_init(); let mut locker = v8::Locker::new(&*inner.v8_isolate.as_mut().unwrap()); let mut hs = v8::HandleScope::new(locker.enter()); let scope = hs.enter(); let context = inner.global_context.get(scope).unwrap(); let mut cs = v8::ContextScope::new(scope, context); let scope = cs.enter(); inner.check_promise_exceptions(scope)?; let mut overflow_response: Option<(OpId, Buf)> = None; loop { // Now handle actual ops. inner.have_unpolled_ops = false; #[allow(clippy::match_wild_err_arm)] match select(&mut inner.pending_ops, &mut inner.pending_unref_ops) .poll_next_unpin(cx) { Poll::Ready(Some(Err(_))) => panic!("unexpected op error"), Poll::Ready(None) => break, Poll::Pending => break, Poll::Ready(Some(Ok((op_id, buf)))) => { let successful_push = inner.shared.push(op_id, &buf); if !successful_push { // If we couldn't push the response to the shared queue, because // there wasn't enough size, we will return the buffer via the // legacy route, using the argument of deno_respond. overflow_response = Some((op_id, buf)); break; } } } } if inner.shared.size() > 0 { inner.async_op_response(scope, None)?; // The other side should have shifted off all the messages. assert_eq!(inner.shared.size(), 0); } if overflow_response.is_some() { let (op_id, buf) = overflow_response.take().unwrap(); inner.async_op_response(scope, Some((op_id, buf)))?; } inner.check_promise_exceptions(scope)?; // We're idle if pending_ops is empty. if inner.pending_ops.is_empty() { Poll::Ready(Ok(())) } else { if inner.have_unpolled_ops { inner.waker.wake(); } Poll::Pending } } } /// IsolateHandle is a thread safe handle on an Isolate. It exposed thread safe V8 functions. #[derive(Clone)] pub struct IsolateHandle { shared_isolate: Arc<Mutex<Option<*mut v8::Isolate>>>, } unsafe impl Send for IsolateHandle {} impl IsolateHandle { /// Terminate the execution of any currently running javascript. /// After terminating execution it is probably not wise to continue using /// the isolate. pub fn terminate_execution(&self) { if let Some(isolate) = *self.shared_isolate.lock().unwrap() { let isolate = unsafe { &mut *isolate }; isolate.terminate_execution(); } } } pub fn js_check<T>(r: Result<T, ErrBox>) -> T { if let Err(e) = r { panic!(e.to_string()); } r.unwrap() } #[cfg(test)] pub mod tests { use super::*; use futures::future::lazy; use std::ops::FnOnce; use std::sync::atomic::{AtomicUsize, Ordering}; pub fn run_in_task<F>(f: F) where F: FnOnce(&mut Context) + Send + 'static, { futures::executor::block_on(lazy(move |cx| f(cx))); } fn poll_until_ready<F>(future: &mut F, max_poll_count: usize) -> F::Output where F: Future + Unpin, { let mut cx = Context::from_waker(futures::task::noop_waker_ref()); for _ in 0..max_poll_count { match future.poll_unpin(&mut cx) { Poll::Pending => continue, Poll::Ready(val) => return val, } } panic!( "Isolate still not ready after polling {} times.", max_poll_count ) } pub enum Mode { Async, AsyncUnref, OverflowReqSync, OverflowResSync, OverflowReqAsync, OverflowResAsync, } pub fn setup(mode: Mode) -> (Box<Isolate>, Arc<AtomicUsize>) { let dispatch_count = Arc::new(AtomicUsize::new(0)); let dispatch_count_ = dispatch_count.clone(); let mut isolate = Isolate::new(StartupData::None, false); let dispatcher = move |control: &[u8], _zero_copy: Option<ZeroCopyBuf>| -> CoreOp { dispatch_count_.fetch_add(1, Ordering::Relaxed); match mode { Mode::Async => { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let buf = vec![43u8].into_boxed_slice(); Op::Async(futures::future::ok(buf).boxed()) } Mode::AsyncUnref => { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let fut = async { // This future never finish. futures::future::pending::<()>().await; let buf = vec![43u8].into_boxed_slice(); Ok(buf) }; Op::AsyncUnref(fut.boxed()) } Mode::OverflowReqSync => { assert_eq!(control.len(), 100 * 1024 * 1024); let buf = vec![43u8].into_boxed_slice(); Op::Sync(buf) } Mode::OverflowResSync => { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let mut vec = Vec::<u8>::new(); vec.resize(100 * 1024 * 1024, 0); vec[0] = 99; let buf = vec.into_boxed_slice(); Op::Sync(buf) } Mode::OverflowReqAsync => { assert_eq!(control.len(), 100 * 1024 * 1024); let buf = vec![43u8].into_boxed_slice(); Op::Async(futures::future::ok(buf).boxed()) } Mode::OverflowResAsync => { assert_eq!(control.len(), 1); assert_eq!(control[0], 42); let mut vec = Vec::<u8>::new(); vec.resize(100 * 1024 * 1024, 0); vec[0] = 4; let buf = vec.into_boxed_slice(); Op::Async(futures::future::ok(buf).boxed()) } } }; isolate.register_op("test", dispatcher); js_check(isolate.execute( "setup.js", r#" function assert(cond) { if (!cond) { throw Error("assert"); } } "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 0); (isolate, dispatch_count) } #[test] fn test_dispatch() { let (mut isolate, dispatch_count) = setup(Mode::Async); js_check(isolate.execute( "filename.js", r#" let control = new Uint8Array([42]); Deno.core.send(1, control); async function main() { Deno.core.send(1, control); } main(); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); } #[test] fn test_poll_async_delayed_ops() { run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::Async); js_check(isolate.execute( "setup2.js", r#" let nrecv = 0; Deno.core.setAsyncHandler(1, (buf) => { nrecv++; }); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 0); js_check(isolate.execute( "check1.js", r#" assert(nrecv == 0); let control = new Uint8Array([42]); Deno.core.send(1, control); assert(nrecv == 0); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); assert!(match isolate.poll_unpin(cx) { Poll::Ready(Ok(_)) => true, _ => false, }); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); js_check(isolate.execute( "check2.js", r#" assert(nrecv == 1); Deno.core.send(1, control); assert(nrecv == 1); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); assert!(match isolate.poll_unpin(cx) { Poll::Ready(Ok(_)) => true, _ => false, }); js_check(isolate.execute("check3.js", "assert(nrecv == 2)")); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); // We are idle, so the next poll should be the last. assert!(match isolate.poll_unpin(cx) { Poll::Ready(Ok(_)) => true, _ => false, }); }); } #[test] fn test_poll_async_optional_ops() { run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::AsyncUnref); js_check(isolate.execute( "check1.js", r#" Deno.core.setAsyncHandler(1, (buf) => { // This handler will never be called assert(false); }); let control = new Uint8Array([42]); Deno.core.send(1, control); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); // The above op never finish, but isolate can finish // because the op is an unreffed async op. assert!(match isolate.poll_unpin(cx) { Poll::Ready(Ok(_)) => true, _ => false, }); }) } #[test] fn terminate_execution() { let (tx, rx) = std::sync::mpsc::channel::<bool>(); let tx_clone = tx.clone(); let (mut isolate, _dispatch_count) = setup(Mode::Async); let shared = isolate.shared_isolate_handle(); let t1 = std::thread::spawn(move || { // allow deno to boot and run std::thread::sleep(std::time::Duration::from_millis(100)); // terminate execution shared.terminate_execution(); // allow shutdown std::thread::sleep(std::time::Duration::from_millis(200)); // unless reported otherwise the test should fail after this point tx_clone.send(false).ok(); }); let t2 = std::thread::spawn(move || { // Rn an infinite loop, which should be terminated. match isolate.execute("infinite_loop.js", "for(;;) {}") { Ok(_) => panic!("execution should be terminated"), Err(e) => { assert_eq!(e.to_string(), "Uncaught Error: execution terminated") } }; // `execute()` returned, which means `terminate_execution()` worked. tx.send(true).ok(); // Make sure the isolate unusable again. isolate .execute("simple.js", "1 + 1") .expect("execution should be possible again"); }); rx.recv().expect("execution should be terminated"); t1.join().unwrap(); t2.join().unwrap(); } #[test] fn dangling_shared_isolate() { let shared = { // isolate is dropped at the end of this block let (mut isolate, _dispatch_count) = setup(Mode::Async); isolate.shared_isolate_handle() }; // this should not SEGFAULT shared.terminate_execution(); } #[test] fn overflow_req_sync() { let (mut isolate, dispatch_count) = setup(Mode::OverflowReqSync); js_check(isolate.execute( "overflow_req_sync.js", r#" let asyncRecv = 0; Deno.core.setAsyncHandler(1, (buf) => { asyncRecv++ }); // Large message that will overflow the shared space. let control = new Uint8Array(100 * 1024 * 1024); let response = Deno.core.dispatch(1, control); assert(response instanceof Uint8Array); assert(response.length == 1); assert(response[0] == 43); assert(asyncRecv == 0); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); } #[test] fn overflow_res_sync() { // TODO(ry) This test is quite slow due to memcpy-ing 100MB into JS. We // should optimize this. let (mut isolate, dispatch_count) = setup(Mode::OverflowResSync); js_check(isolate.execute( "overflow_res_sync.js", r#" let asyncRecv = 0; Deno.core.setAsyncHandler(1, (buf) => { asyncRecv++ }); // Large message that will overflow the shared space. let control = new Uint8Array([42]); let response = Deno.core.dispatch(1, control); assert(response instanceof Uint8Array); assert(response.length == 100 * 1024 * 1024); assert(response[0] == 99); assert(asyncRecv == 0); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); } #[test] fn overflow_req_async() { run_in_task(|cx| { let (mut isolate, dispatch_count) = setup(Mode::OverflowReqAsync); js_check(isolate.execute( "overflow_req_async.js", r#" let asyncRecv = 0; Deno.core.setAsyncHandler(1, (buf) => { assert(buf.byteLength === 1); assert(buf[0] === 43); asyncRecv++; }); // Large message that will overflow the shared space. let control = new Uint8Array(100 * 1024 * 1024); let response = Deno.core.dispatch(1, control); // Async messages always have null response. assert(response == null); assert(asyncRecv == 0); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); assert!(match isolate.poll_unpin(cx) { Poll::Ready(Ok(_)) => true, _ => false, }); js_check(isolate.execute("check.js", "assert(asyncRecv == 1);")); }); } #[test] fn overflow_res_async() { run_in_task(|_cx| { // TODO(ry) This test is quite slow due to memcpy-ing 100MB into JS. We // should optimize this. let (mut isolate, dispatch_count) = setup(Mode::OverflowResAsync); js_check(isolate.execute( "overflow_res_async.js", r#" let asyncRecv = 0; Deno.core.setAsyncHandler(1, (buf) => { assert(buf.byteLength === 100 * 1024 * 1024); assert(buf[0] === 4); asyncRecv++; }); // Large message that will overflow the shared space. let control = new Uint8Array([42]); let response = Deno.core.dispatch(1, control); assert(response == null); assert(asyncRecv == 0); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 1); poll_until_ready(&mut isolate, 3).unwrap(); js_check(isolate.execute("check.js", "assert(asyncRecv == 1);")); }); } #[test] fn overflow_res_multiple_dispatch_async() { // TODO(ry) This test is quite slow due to memcpy-ing 100MB into JS. We // should optimize this. run_in_task(|_cx| { let (mut isolate, dispatch_count) = setup(Mode::OverflowResAsync); js_check(isolate.execute( "overflow_res_multiple_dispatch_async.js", r#" let asyncRecv = 0; Deno.core.setAsyncHandler(1, (buf) => { assert(buf.byteLength === 100 * 1024 * 1024); assert(buf[0] === 4); asyncRecv++; }); // Large message that will overflow the shared space. let control = new Uint8Array([42]); let response = Deno.core.dispatch(1, control); assert(response == null); assert(asyncRecv == 0); // Dispatch another message to verify that pending ops // are done even if shared space overflows Deno.core.dispatch(1, control); "#, )); assert_eq!(dispatch_count.load(Ordering::Relaxed), 2); poll_until_ready(&mut isolate, 3).unwrap(); js_check(isolate.execute("check.js", "assert(asyncRecv == 2);")); }); } #[test] fn test_pre_dispatch() { run_in_task(|mut cx| { let (mut isolate, _dispatch_count) = setup(Mode::OverflowResAsync); js_check(isolate.execute( "bad_op_id.js", r#" let thrown; try { Deno.core.dispatch(100, []); } catch (e) { thrown = e; } assert(String(thrown) === "TypeError: Unknown op id: 100"); "#, )); if let Poll::Ready(Err(_)) = isolate.poll_unpin(&mut cx) { unreachable!(); } }); } #[test] fn test_js() { run_in_task(|mut cx| { let (mut isolate, _dispatch_count) = setup(Mode::Async); js_check( isolate.execute( "shared_queue_test.js", include_str!("shared_queue_test.js"), ), ); if let Poll::Ready(Err(_)) = isolate.poll_unpin(&mut cx) { unreachable!(); } }); } #[test] fn will_snapshot() { let snapshot = { let mut isolate = Isolate::new(StartupData::None, true); js_check(isolate.execute("a.js", "a = 1 + 2")); let s = isolate.snapshot().unwrap(); drop(isolate); s }; let startup_data = StartupData::OwnedSnapshot(snapshot); let mut isolate2 = Isolate::new(startup_data, false); js_check(isolate2.execute("check.js", "if (a != 3) throw Error('x')")); } } // TODO(piscisaureus): rusty_v8 should implement the Error trait on // values of type v8::Global<T>. pub struct ErrWithV8Handle { err: ErrBox, handle: v8::Global<v8::Value>, } impl ErrWithV8Handle { pub fn new( scope: &mut impl v8::InIsolate, err: ErrBox, handle: v8::Local<v8::Value>, ) -> Self { let handle = v8::Global::new_from(scope, handle); Self { err, handle } } pub fn get_handle(&mut self) -> &mut v8::Global<v8::Value> { &mut self.handle } } unsafe impl Send for ErrWithV8Handle {} unsafe impl Sync for ErrWithV8Handle {} impl Error for ErrWithV8Handle {} impl fmt::Display for ErrWithV8Handle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.err.fmt(f) } } impl fmt::Debug for ErrWithV8Handle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.err.fmt(f) } }
31.112203
95
0.618827
ac31757d17d5b6d5660f1c191a24023cb02c005c
6,379
use std::cmp; use crate::utils::{is_copy, is_self_ty, snippet, span_lint_and_sugg}; use if_chain::if_chain; use matches::matches; use rustc::session::config::Config as SessionConfig; use rustc::ty; use rustc_errors::Applicability; use rustc_hir as hir; use rustc_hir::intravisit::FnKind; use rustc_hir::*; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::Span; use rustc_target::abi::LayoutOf; use rustc_target::spec::abi::Abi; declare_clippy_lint! { /// **What it does:** Checks for functions taking arguments by reference, where /// the argument type is `Copy` and small enough to be more efficient to always /// pass by value. /// /// **Why is this bad?** In many calling conventions instances of structs will /// be passed through registers if they fit into two or less general purpose /// registers. /// /// **Known problems:** This lint is target register size dependent, it is /// limited to 32-bit to try and reduce portability problems between 32 and /// 64-bit, but if you are compiling for 8 or 16-bit targets then the limit /// will be different. /// /// The configuration option `trivial_copy_size_limit` can be set to override /// this limit for a project. /// /// This lint attempts to allow passing arguments by reference if a reference /// to that argument is returned. This is implemented by comparing the lifetime /// of the argument and return value for equality. However, this can cause /// false positives in cases involving multiple lifetimes that are bounded by /// each other. /// /// **Example:** /// /// ```rust /// // Bad /// fn foo(v: &u32) {} /// ``` /// /// ```rust /// // Better /// fn foo(v: u32) {} /// ``` pub TRIVIALLY_COPY_PASS_BY_REF, perf, "functions taking small copyable arguments by reference" } #[derive(Copy, Clone)] pub struct TriviallyCopyPassByRef { limit: u64, } impl<'a, 'tcx> TriviallyCopyPassByRef { pub fn new(limit: Option<u64>, target: &SessionConfig) -> Self { let limit = limit.unwrap_or_else(|| { let bit_width = u64::from(target.ptr_width); // Cap the calculated bit width at 32-bits to reduce // portability problems between 32 and 64-bit targets let bit_width = cmp::min(bit_width, 32); #[allow(clippy::integer_division)] let byte_width = bit_width / 8; // Use a limit of 2 times the register byte width byte_width * 2 }); Self { limit } } fn check_poly_fn(&mut self, cx: &LateContext<'_, 'tcx>, hir_id: HirId, decl: &FnDecl<'_>, span: Option<Span>) { let fn_def_id = cx.tcx.hir().local_def_id(hir_id); let fn_sig = cx.tcx.fn_sig(fn_def_id); let fn_sig = cx.tcx.erase_late_bound_regions(&fn_sig); // Use lifetimes to determine if we're returning a reference to the // argument. In that case we can't switch to pass-by-value as the // argument will not live long enough. let output_lts = match fn_sig.output().kind { ty::Ref(output_lt, _, _) => vec![output_lt], ty::Adt(_, substs) => substs.regions().collect(), _ => vec![], }; for (input, &ty) in decl.inputs.iter().zip(fn_sig.inputs()) { // All spans generated from a proc-macro invocation are the same... match span { Some(s) if s == input.span => return, _ => (), } if_chain! { if let ty::Ref(input_lt, ty, Mutability::Not) = ty.kind; if !output_lts.contains(&input_lt); if is_copy(cx, ty); if let Some(size) = cx.layout_of(ty).ok().map(|l| l.size.bytes()); if size <= self.limit; if let hir::TyKind::Rptr(_, MutTy { ty: ref decl_ty, .. }) = input.kind; then { let value_type = if is_self_ty(decl_ty) { "self".into() } else { snippet(cx, decl_ty.span, "_").into() }; span_lint_and_sugg( cx, TRIVIALLY_COPY_PASS_BY_REF, input.span, &format!("this argument ({} byte) is passed by reference, but would be more efficient if passed by value (limit: {} byte)", size, self.limit), "consider passing by value instead", value_type, Applicability::Unspecified, ); } } } } } impl_lint_pass!(TriviallyCopyPassByRef => [TRIVIALLY_COPY_PASS_BY_REF]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TriviallyCopyPassByRef { fn check_trait_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::TraitItem<'_>) { if item.span.from_expansion() { return; } if let hir::TraitItemKind::Method(method_sig, _) = &item.kind { self.check_poly_fn(cx, item.hir_id, &*method_sig.decl, None); } } fn check_fn( &mut self, cx: &LateContext<'a, 'tcx>, kind: FnKind<'tcx>, decl: &'tcx FnDecl<'_>, _body: &'tcx Body<'_>, span: Span, hir_id: HirId, ) { if span.from_expansion() { return; } match kind { FnKind::ItemFn(.., header, _, attrs) => { if header.abi != Abi::Rust { return; } for a in attrs { if a.meta_item_list().is_some() && a.check_name(sym!(proc_macro_derive)) { return; } } }, FnKind::Method(..) => (), _ => return, } // Exclude non-inherent impls if let Some(Node::Item(item)) = cx.tcx.hir().find(cx.tcx.hir().get_parent_node(hir_id)) { if matches!(item.kind, ItemKind::Impl(_, _, _, _, Some(_), _, _) | ItemKind::Trait(..)) { return; } } self.check_poly_fn(cx, hir_id, decl, Some(span)); } }
35.438889
166
0.545697
d67afa187b1574c3baa36e017d6b5d7466d36c67
287
struct Foo(i32, bool); impl Foo { fn new(a: i32, b: bool) -> Foo { Foo(a, b) } fn test2() -> i32 { test_forward_decl() } } fn test_forward_decl() -> i32 { 123 } fn main() { let a; a = Foo::new(1, true); let b; b = Foo::test2(); }
11.958333
36
0.45993
037ecffd7188090e40dee77ac05bca23fea38965
12,074
use std::path::PathBuf; use dprint_core::plugins::PluginInfo; use crate::environment::Environment; use crate::types::ErrBox; use super::super::CompileFn; use super::manifest::*; pub struct Cache<'a, TEnvironment : Environment, TCompileFn: CompileFn> { environment: &'a TEnvironment, cache_manifest: CacheManifest, compile: &'a TCompileFn, } pub struct PluginCacheItem { pub file_path: PathBuf, pub info: PluginInfo, } impl<'a, TEnvironment, TCompileFn> Cache<'a, TEnvironment, TCompileFn> where TEnvironment : Environment, TCompileFn : CompileFn { pub fn new(environment: &'a TEnvironment, compile: &'a TCompileFn) -> Result<Self, ErrBox> { let cache_manifest = read_manifest(environment)?; Ok(Cache { environment, cache_manifest, compile, }) } pub async fn get_plugin_cache_item(&mut self, url: &str) -> Result<PluginCacheItem, ErrBox> { let cache_dir = self.environment.get_cache_dir()?; if let Some(cache_entry) = self.get_url_cache_entry(url) { let file_path = cache_dir.join(&cache_entry.file_name); let info = match cache_entry.plugin_info.clone() { Some(info) => info, None => return err!("Expected to have plugin info stored in the cache."), }; return Ok(PluginCacheItem { file_path, info, }); } let file_bytes = self.environment.download_file(url).await?; let file_name = self.get_file_name_from_url_or_path(url, "compiled_wasm"); let file_path = cache_dir.join(&file_name); self.environment.log("Compiling wasm module..."); let compile_result = (self.compile)(&file_bytes)?; let url_cache_entry = UrlCacheEntry { url: String::from(url), file_name, created_time: self.environment.get_time_secs(), plugin_info: Some(compile_result.plugin_info.clone()), }; self.environment.write_file_bytes(&file_path, &compile_result.bytes)?; self.cache_manifest.urls.push(url_cache_entry); self.save_manifest()?; Ok(PluginCacheItem { file_path, info: compile_result.plugin_info, }) } pub fn forget_url(&mut self, url: &str) -> Result<(), ErrBox> { if let Some(index) = self.get_url_cache_entry_index(url) { if let Some(entry) = self.cache_manifest.urls.get(index) { let cache_dir = self.environment.get_cache_dir()?; let cache_file = cache_dir.join(&entry.file_name); match self.environment.remove_file(&cache_file) { _ => {}, // do nothing on success or failure } } self.cache_manifest.urls.remove(index); self.save_manifest()?; } Ok(()) } fn get_file_name_from_url_or_path(&self, text: &str, extension: &str) -> String { let text = text.trim_end_matches('/').trim_end_matches('\\'); let last_slash = std::cmp::max(text.rfind('/').unwrap_or(0), text.rfind('\\').unwrap_or(0)); if last_slash == 0 { self.get_unique_file_name("temp", extension) } else { let file_name = PathBuf::from(&text[last_slash + 1..]); let file_stem = file_name.file_stem().expect("Expected to find the file stem."); // no extension self.get_unique_file_name(file_stem.to_str().unwrap(), extension) } } fn get_unique_file_name(&self, prefix: &str, extension: &str) -> String { let mut index = 1; loop { let file_name_with_ext = if index == 1 { get_file_name_with_ext(prefix, extension) } else { get_file_name_with_ext(&format!("{}_{}", prefix, index), extension) }; if self.get_file_name_cache_entry(&file_name_with_ext).is_some() { index += 1; } else { return file_name_with_ext; } } fn get_file_name_with_ext(file_name: &str, extension: &str) -> String { format!("{}.{}", file_name, extension) } } fn get_file_name_cache_entry<'b>(&'b self, file_name: &str) -> Option<&'b UrlCacheEntry> { self.cache_manifest.urls.iter().filter(|u| u.file_name == file_name).next() } fn get_url_cache_entry<'b>(&'b self, url: &str) -> Option<&'b UrlCacheEntry> { self.cache_manifest.urls.iter().filter(|u| u.url == url).next() } fn get_url_cache_entry_index(&self, url: &str) -> Option<usize> { self.cache_manifest.urls.iter().position(|u| u.url == url) } fn save_manifest(&self) -> Result<(), ErrBox> { write_manifest(&self.cache_manifest, self.environment) } } #[cfg(test)] mod test { use dprint_core::plugins::PluginInfo; use crate::environment::TestEnvironment; use crate::plugins::types::CompilationResult; use crate::types::ErrBox; use super::*; #[tokio::test] async fn it_should_read_file_paths_from_manifest() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.write_file( &environment.get_cache_dir().unwrap().join("cache-manifest.json"), r#"{ "urls": [{ "url": "https://plugins.dprint.dev/test.wasm", "fileName": "my-file.wasm", "createdTime": 123456, "pluginInfo": { "name": "test-plugin", "version": "0.1.0", "configKeys": ["test-plugin"], "fileExtensions": ["txt","dat"], "helpUrl": "test-url", "configSchemaUrl": "schema-url" } }] }"# ).unwrap(); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let cache_item = cache.get_plugin_cache_item("https://plugins.dprint.dev/test.wasm").await?; assert_eq!(cache_item.file_path, environment.get_cache_dir().unwrap().join("my-file.wasm")); assert_eq!(cache_item.info, get_test_plugin_info()); Ok(()) } #[tokio::test] async fn it_should_download_file() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.add_remote_file("https://plugins.dprint.dev/test.wasm", "t".as_bytes()); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let file_path = cache.get_plugin_cache_item("https://plugins.dprint.dev/test.wasm").await?.file_path; let expected_file_path = PathBuf::from("/cache").join("test.compiled_wasm"); assert_eq!(file_path, expected_file_path); // should be the same when requesting it again let file_path = cache.get_plugin_cache_item("https://plugins.dprint.dev/test.wasm").await?.file_path; assert_eq!(file_path, expected_file_path); // should have saved the manifest assert_eq!( environment.read_file(&environment.get_cache_dir().unwrap().join("cache-manifest.json")).unwrap(), r#"{"urls":[{"url":"https://plugins.dprint.dev/test.wasm","fileName":"test.compiled_wasm","createdTime":123456,"pluginInfo":{"name":"test-plugin","version":"0.1.0","configKeys":["test-plugin"],"fileExtensions":["txt","dat"],"helpUrl":"test-url","configSchemaUrl":"schema-url"}}]}"#, ); Ok(()) } #[tokio::test] async fn it_should_handle_multiple_urls_with_same_file_name() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.add_remote_file("https://plugins.dprint.dev/test.wasm", "t".as_bytes()); environment.add_remote_file("https://plugins.dprint.dev/other/test.wasm", "t".as_bytes()); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let file_path = cache.get_plugin_cache_item("https://plugins.dprint.dev/test.wasm").await?.file_path; assert_eq!(file_path, PathBuf::from("/cache").join("test.compiled_wasm")); let file_path = cache.get_plugin_cache_item("https://plugins.dprint.dev/other/test.wasm").await?.file_path; assert_eq!(file_path, PathBuf::from("/cache").join("test_2.compiled_wasm")); Ok(()) } #[tokio::test] async fn it_should_handle_urls_without_extension_or_no_slash() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.add_remote_file("https://plugins.dprint.dev/test", "t".as_bytes()); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let file_path = cache.get_plugin_cache_item("https://plugins.dprint.dev/test").await?.file_path; assert_eq!(file_path, PathBuf::from("/cache").join("test.compiled_wasm")); Ok(()) } #[tokio::test] async fn it_should_handle_urls_without_slash() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.add_remote_file("testing", "t".as_bytes()); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let file_path = cache.get_plugin_cache_item("testing").await?.file_path; assert_eq!(file_path, PathBuf::from("/cache").join("temp.compiled_wasm")); Ok(()) } #[tokio::test] async fn it_should_handle_with_backslash_for_some_reason() -> Result<(), ErrBox> { let environment = TestEnvironment::new(); environment.add_remote_file("testing\\asdf", "t".as_bytes()); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); let file_path = cache.get_plugin_cache_item("testing\\asdf").await?.file_path; assert_eq!(file_path, PathBuf::from("/cache").join("asdf.compiled_wasm")); Ok(()) } #[test] fn it_should_delete_url_from_manifest_when_no_file() { let environment = TestEnvironment::new(); environment.write_file( &environment.get_cache_dir().unwrap().join("cache-manifest.json"), r#"{ "urls": [{ "url": "https://plugins.dprint.dev/test.wasm", "fileName": "my-file.wasm", "createdTime": 123456 }] }"# ).unwrap(); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); cache.forget_url("https://plugins.dprint.dev/test.wasm").unwrap(); assert_eq!( environment.read_file(&environment.get_cache_dir().unwrap().join("cache-manifest.json")).unwrap(), r#"{"urls":[]}"# ); } #[test] fn it_should_delete_url_from_manifest_when_file_exists() { let environment = TestEnvironment::new(); environment.write_file( &environment.get_cache_dir().unwrap().join("cache-manifest.json"), r#"{"urls": [{ "url": "https://plugins.dprint.dev/test.wasm", "fileName": "my-file.wasm", "createdTime": 123456 }] }"# ).unwrap(); let wasm_file_path = environment.get_cache_dir().unwrap().join("my-file.wasm"); environment.write_file_bytes(&wasm_file_path, "t".as_bytes()).unwrap(); let mut cache = Cache::new(&environment, &identity_compile).unwrap(); cache.forget_url("https://plugins.dprint.dev/test.wasm").unwrap(); // should delete the file too assert_eq!(environment.read_file(&wasm_file_path).is_err(), true); assert_eq!( environment.read_file(&environment.get_cache_dir().unwrap().join("cache-manifest.json")).unwrap(), r#"{"urls":[]}"# ); } fn identity_compile(bytes: &[u8]) -> Result<CompilationResult, ErrBox> { Ok(CompilationResult { bytes: bytes.to_vec(), plugin_info: get_test_plugin_info(), }) } fn get_test_plugin_info() -> PluginInfo { PluginInfo { name: String::from("test-plugin"), version: String::from("0.1.0"), config_keys: vec![String::from("test-plugin")], file_extensions: vec![String::from("txt"), String::from("dat")], help_url: String::from("test-url"), config_schema_url: String::from("schema-url"), } } }
40.653199
294
0.615952
6215fb0cd7db3917011453eb0df41799f6c20dd2
443
/// Note: this object does not actually exist in the spec. /// /// We use it for managing attestations that have not been aggregated. use super::{AttestationData, Signature}; use serde_derive::Serialize; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, PartialEq, Serialize)] pub struct FreeAttestation { pub data: AttestationData, pub signature: Signature, pub validator_index: u64, }
31.642857
70
0.740406
b9c6f11dc9af768642dd264fc4ed60c7c1094a15
77
use adder; #[test] fn it_adds_two(){ assert_eq!(4, adder::add_two(2)); }
12.833333
37
0.623377
90cf193c0a142591ec9d42575bcd4e1e0cd50d17
30
pub mod search; pub mod sort;
10
15
0.733333
e57141b4baaf946a60af2dd709452d6272610752
1,432
extern crate clap; extern crate sempfind; use clap::{App, Arg}; use std::process; fn main() { let matches = App::new("Semperos Find") .version("0.1.0") .author("Daniel Gregoire <[email protected]>") .about("Find files and things by name") .arg( Arg::with_name("directory_name") .short("d") .long("directory") .value_name("DIRECTORY_NAME") .help("Directory's name (case-insensitive substring)") ) .arg( Arg::with_name("file_name") .short("f") .long("file") .value_name("FILE_NAME") .help("File's name (case-insensitive substring)"), ) .get_matches(); if let Some(file_name) = matches.value_of("file_name") { if let Err(e) = sempfind::find_file(file_name) { eprintln!("[ERROR] Failed to find {}, details: {:?}", file_name, e); process::exit(1); } else { process::exit(0); } } if let Some(directory_name) = matches.value_of("directory_name") { if let Err(e) = sempfind::find_directory(directory_name) { eprintln!("[ERROR] Failed to find {}, details: {:?}", directory_name, e); process::exit(1); } else { process::exit(0); } } println!("{}", matches.usage()); }
30.468085
85
0.511872
72d8023454e3405a6dcd13a93b387440121b6452
864
struct Solution; impl Solution { fn get_lucky(s: String, k: i32) -> i32 { let mut x = "".to_string(); for c in s.bytes() { x.push_str(&format!("{}", c - b'a' + 1)); } for _ in 0..k { x = Solution::transform(x); } x.parse::<i32>().unwrap() } fn transform(s: String) -> String { let mut res = 0; for c in s.bytes() { res += (c - b'0') as i32; } format!("{}", res) } } #[test] fn test() { let s = "iiii".to_string(); let k = 1; let res = 36; assert_eq!(Solution::get_lucky(s, k), res); let s = "leetcode".to_string(); let k = 2; let res = 6; assert_eq!(Solution::get_lucky(s, k), res); let s = "zbax".to_string(); let k = 2; let res = 8; assert_eq!(Solution::get_lucky(s, k), res); }
22.153846
53
0.46412
fb215c21e6f14cdd4cdb60e0dee1e0b294b81bb3
432
//! //! The contract resource POST request. //! use serde::Deserialize; use serde::Serialize; use zksync_types::Address; /// /// The contract resource POST response body. /// #[derive(Debug, Serialize, Deserialize)] pub struct Body { /// The contract address. pub address: Address, } impl Body { /// /// A shortcut constructor. /// pub fn new(address: Address) -> Self { Self { address } } }
16
45
0.618056
effcabf775e8a668f283c3f79560305ed7adf422
1,694
use super::super::source::*; use super::super::target::*; use super::super::super::traits::*; impl TimeControlPoint { /// /// Generates a serialized version of this time control point on the specified data target /// pub fn serialize<Tgt: AnimationDataTarget>(&self, data: &mut Tgt) { self.point.serialize(data); self.past.serialize(data); self.future.serialize(data); } /// /// Generates a serialized version of this time control point on the specified data target /// pub fn serialize_next<Tgt: AnimationDataTarget>(&self, last: &TimePoint, data: &mut Tgt) -> TimePoint { self.past.serialize_next(last, data); self.point.serialize_next(&self.past, data); self.future.serialize_next(&self.point, data); self.future.clone() } /// /// Deserializes a time control point from a data source /// pub fn deserialize<Src: AnimationDataSource>(data: &mut Src) -> TimeControlPoint { let point = TimePoint::deserialize(data); let past = TimePoint::deserialize(data); let future = TimePoint::deserialize(data); TimeControlPoint { point, past, future } } /// /// Deserializes a time control point from a data source, where the last point is known /// pub fn deserialize_next<Src: AnimationDataSource>(last: &TimePoint, data: &mut Src) -> (TimeControlPoint, TimePoint) { let past = TimePoint::deserialize_next(last, data); let point = TimePoint::deserialize_next(&past, data); let future = TimePoint::deserialize_next(&point, data); (TimeControlPoint { point, past, future }, future) } }
35.291667
122
0.645218
e81b36d8a8828f921236838cfd381c7eb4d872e5
7,646
extern crate imgui_winit_support; use imgui::*; use imgui_wgpu::{Renderer, RendererConfig}; use pollster::block_on; use std::time::Instant; use winit::{ dpi::LogicalSize, event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::Window, }; fn main() { env_logger::init(); // Set up window and GPU let event_loop = EventLoop::new(); let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY); let (window, size, surface) = { let version = env!("CARGO_PKG_VERSION"); let window = Window::new(&event_loop).unwrap(); window.set_inner_size(LogicalSize { width: 1280.0, height: 720.0, }); window.set_title(&format!("imgui-wgpu {}", version)); let size = window.inner_size(); let surface = unsafe { instance.create_surface(&window) }; (window, size, surface) }; let hidpi_factor = window.scale_factor(); let adapter = block_on(instance.request_adapter(&wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), })) .unwrap(); let (device, queue) = block_on(adapter.request_device(&wgpu::DeviceDescriptor::default(), None)).unwrap(); // Set up swap chain let sc_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::RENDER_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, width: size.width as u32, height: size.height as u32, present_mode: wgpu::PresentMode::Mailbox, }; let mut swap_chain = device.create_swap_chain(&surface, &sc_desc); // Set up dear imgui let mut imgui = imgui::Context::create(); let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui); platform.attach_window( imgui.io_mut(), &window, imgui_winit_support::HiDpiMode::Default, ); imgui.set_ini_filename(None); let font_size = (13.0 * hidpi_factor) as f32; imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32; imgui.fonts().add_font(&[FontSource::DefaultFontData { config: Some(imgui::FontConfig { oversample_h: 1, pixel_snap_h: true, size_pixels: font_size, ..Default::default() }), }]); // // Set up dear imgui wgpu renderer // let clear_color = wgpu::Color { r: 0.1, g: 0.2, b: 0.3, a: 1.0, }; let renderer_config = RendererConfig { texture_format: sc_desc.format, ..Default::default() }; let mut renderer = Renderer::new(&mut imgui, &device, &queue, renderer_config); let mut last_frame = Instant::now(); let mut demo_open = true; let mut last_cursor = None; // Event loop event_loop.run(move |event, _, control_flow| { *control_flow = if cfg!(feature = "metal-auto-capture") { ControlFlow::Exit } else { ControlFlow::Poll }; match event { Event::WindowEvent { event: WindowEvent::Resized(_), .. } => { let size = window.inner_size(); let sc_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::RENDER_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, width: size.width as u32, height: size.height as u32, present_mode: wgpu::PresentMode::Mailbox, }; swap_chain = device.create_swap_chain(&surface, &sc_desc); } Event::WindowEvent { event: WindowEvent::KeyboardInput { input: KeyboardInput { virtual_keycode: Some(VirtualKeyCode::Escape), state: ElementState::Pressed, .. }, .. }, .. } | Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => { *control_flow = ControlFlow::Exit; } Event::MainEventsCleared => window.request_redraw(), Event::RedrawEventsCleared => { let delta_s = last_frame.elapsed(); let now = Instant::now(); imgui.io_mut().update_delta_time(now - last_frame); last_frame = now; let frame = match swap_chain.get_current_frame() { Ok(frame) => frame, Err(e) => { eprintln!("dropped frame: {:?}", e); return; } }; platform .prepare_frame(imgui.io_mut(), &window) .expect("Failed to prepare frame"); let ui = imgui.frame(); { let window = imgui::Window::new(im_str!("Hello world")); window .size([300.0, 100.0], Condition::FirstUseEver) .build(&ui, || { ui.text(im_str!("Hello world!")); ui.text(im_str!("This...is...imgui-rs on WGPU!")); ui.separator(); let mouse_pos = ui.io().mouse_pos; ui.text(im_str!( "Mouse Position: ({:.1},{:.1})", mouse_pos[0], mouse_pos[1] )); }); let window = imgui::Window::new(im_str!("Hello too")); window .size([400.0, 200.0], Condition::FirstUseEver) .position([400.0, 200.0], Condition::FirstUseEver) .build(&ui, || { ui.text(im_str!("Frametime: {:?}", delta_s)); }); ui.show_demo_window(&mut demo_open); } let mut encoder: wgpu::CommandEncoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None }); if last_cursor != Some(ui.mouse_cursor()) { last_cursor = Some(ui.mouse_cursor()); platform.prepare_render(&ui, &window); } let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: None, color_attachments: &[wgpu::RenderPassColorAttachment { view: &frame.output.view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Clear(clear_color), store: true, }, }], depth_stencil_attachment: None, }); renderer .render(ui.render(), &queue, &device, &mut rpass) .expect("Rendering failed"); drop(rpass); queue.submit(Some(encoder.finish())); } _ => (), } platform.handle_event(imgui.io_mut(), &window, &event); }); }
33.388646
99
0.481951
387c47e9d31a0113f8df0d77e1b7c471e0a028ac
6,692
pub use crate::profile::Sender as ProfileSender; use futures::future; pub use linkerd2_app_core::proxy::{ api_resolve::{Metadata, ProtocolHint}, core::resolve::{Resolve, Update}, }; use linkerd2_app_core::{ profiles::{self, Profile}, Error, }; use std::collections::HashMap; use std::hash::Hash; use std::net::SocketAddr; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, }; use std::task::{Context, Poll}; use tokio::sync::{mpsc, watch}; #[derive(Debug)] pub struct Resolver<T, E> { state: Arc<State<T, E>>, } pub type Dst<T, E> = Resolver<T, DstReceiver<E>>; pub type Profiles<T> = Resolver<T, Option<profiles::Receiver>>; #[derive(Debug, Clone)] pub struct DstSender<T>(mpsc::UnboundedSender<Result<Update<T>, Error>>); #[derive(Debug, Clone)] pub struct Handle<T, E>(Arc<State<T, E>>); #[derive(Debug)] struct State<T, E> { endpoints: Mutex<HashMap<T, E>>, // Keep unused_senders open if they're not going to be used. unused_senders: Mutex<Vec<Box<dyn std::any::Any + Send + Sync + 'static>>>, only: AtomicBool, } pub type DstReceiver<E> = mpsc::UnboundedReceiver<Result<Update<E>, Error>>; impl<T, E> Default for Resolver<T, E> where T: Hash + Eq, { fn default() -> Self { Self { state: Arc::new(State { endpoints: Mutex::new(HashMap::new()), unused_senders: Mutex::new(Vec::new()), only: AtomicBool::new(true), }), } } } impl<T, E> Resolver<T, E> where T: Hash + Eq, { pub fn with_handle() -> (Self, Handle<T, E>) { let r = Self::default(); let handle = r.handle(); (r, handle) } pub fn handle(&self) -> Handle<T, E> { Handle(self.state.clone()) } } impl<T, E> Clone for Resolver<T, E> { fn clone(&self) -> Self { Self { state: self.state.clone(), } } } // === destination resolver === impl<T, E> Dst<T, E> where T: Hash + Eq, { pub fn endpoint_tx(&self, t: impl Into<T>) -> DstSender<E> { let (tx, rx) = mpsc::unbounded_channel(); self.state.endpoints.lock().unwrap().insert(t.into(), rx); DstSender(tx) } pub fn endpoint_exists(self, t: impl Into<T>, addr: SocketAddr, meta: E) -> Self { let mut tx = self.endpoint_tx(t); tx.add(vec![(addr, meta)]).unwrap(); self } } impl<T, E> tower::Service<T> for Dst<T, E> where T: Hash + Eq + std::fmt::Debug, { type Response = DstReceiver<E>; type Future = futures::future::Ready<Result<Self::Response, Self::Error>>; type Error = Error; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, target: T) -> Self::Future { let span = tracing::trace_span!("mock_resolver", ?target); let _e = span.enter(); let res = self .state .endpoints .lock() .unwrap() .remove(&target) .map(|x| { tracing::trace!("found endpoint for target"); x }) .unwrap_or_else(|| { tracing::debug!(?target, "no endpoint configured for"); // An unknown endpoint was resolved! self.state .only .compare_and_swap(true, false, Ordering::Release); let (tx, rx) = mpsc::unbounded_channel(); let _ = tx.send(Ok(Update::DoesNotExist)); rx }); future::ok(res) } } // === profile resolver === impl<T> Profiles<T> where T: Hash + Eq, { pub fn profile_tx(&self, addr: T) -> ProfileSender { let (tx, rx) = watch::channel(Profile::default()); self.state.endpoints.lock().unwrap().insert(addr, Some(rx)); tx } pub fn profile(self, addr: T, profile: Profile) -> Self { let (tx, rx) = watch::channel(profile); self.state.unused_senders.lock().unwrap().push(Box::new(tx)); self.state.endpoints.lock().unwrap().insert(addr, Some(rx)); self } pub fn no_profile(self, addr: T) -> Self { self.state.endpoints.lock().unwrap().insert(addr, None); self } } impl<T> tower::Service<T> for Profiles<T> where T: Hash + Eq + std::fmt::Debug, { type Error = Error; type Response = Option<profiles::Receiver>; type Future = futures::future::Ready<Result<Self::Response, Self::Error>>; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, addr: T) -> Self::Future { let span = tracing::trace_span!("mock_profile", ?addr); let _e = span.enter(); let res = self .state .endpoints .lock() .unwrap() .remove(&addr) .map(|x| { tracing::trace!("found endpoint for addr"); x }) .unwrap_or_else(|| { tracing::debug!(?addr, "no endpoint configured for"); // An unknown endpoint was resolved! self.state .only .compare_and_swap(true, false, Ordering::Release); None }); future::ok(res) } } // === impl Sender === impl<E> DstSender<E> { pub fn update(&mut self, up: Update<E>) -> Result<(), ()> { self.0.send(Ok(up)).map_err(|_| ()) } pub fn add(&mut self, addrs: impl IntoIterator<Item = (SocketAddr, E)>) -> Result<(), ()> { self.update(Update::Add(addrs.into_iter().collect())) } pub fn remove(&mut self, addrs: impl IntoIterator<Item = SocketAddr>) -> Result<(), ()> { self.update(Update::Remove(addrs.into_iter().collect())) } pub fn reset(&mut self, addrs: impl IntoIterator<Item = (SocketAddr, E)>) -> Result<(), ()> { self.update(Update::Reset(addrs.into_iter().collect())) } pub fn does_not_exist(&mut self) -> Result<(), ()> { self.update(Update::DoesNotExist) } pub fn err(&mut self, e: impl Into<Error>) -> Result<(), ()> { self.0.send(Err(e.into())).map_err(|_| ()) } } // === impl Handle === impl<T, E> Handle<T, E> { /// Returns `true` if all configured endpoints were resolved exactly once. pub fn is_empty(&self) -> bool { self.0.endpoints.lock().unwrap().is_empty() } /// Returns `true` if only the configured endpoints were resolved. pub fn only_configured(&self) -> bool { self.0.only.load(Ordering::Acquire) } }
27.203252
97
0.543335
218e9668df4ae60ce6ec08672aa6e2485f01344a
16,128
//! This implements the core logic of the compression scheme used to compactly //! encode Unicode properties. //! //! We have two primary goals with the encoding: we want to be compact, because //! these tables often end up in ~every Rust program (especially the //! grapheme_extend table, used for str debugging), including those for embedded //! targets (where space is important). We also want to be relatively fast, //! though this is more of a nice to have rather than a key design constraint. //! It is expected that libraries/applications which are performance-sensitive //! to Unicode property lookups are extremely rare, and those that care may find //! the tradeoff of the raw bitsets worth it. For most applications, a //! relatively fast but much smaller (and as such less cache-impacting, etc.) //! data set is likely preferable. //! //! We have two separate encoding schemes: a skiplist-like approach, and a //! compressed bitset. The datasets we consider mostly use the skiplist (it's //! smaller) but the lowercase and uppercase sets are sufficiently sparse for //! the bitset to be worthwhile -- for those sets the bitset is a 2x size win. //! Since the bitset is also faster, this seems an obvious choice. (As a //! historical note, the bitset was also the prior implementation, so its //! relative complexity had already been paid). //! //! ## The bitset //! //! The primary idea is that we 'flatten' the Unicode ranges into an enormous //! bitset. To represent any arbitrary codepoint in a raw bitset, we would need //! over 17 kilobytes of data per character set -- way too much for our //! purposes. //! //! First, the raw bitset (one bit for every valid `char`, from 0 to 0x10FFFF, //! not skipping the small 'gap') is associated into words (u64) and //! deduplicated. On random data, this would be useless; on our data, this is //! incredibly beneficial -- our data sets have (far) less than 256 unique //! words. //! //! This gives us an array that maps `u8 -> word`; the current algorithm does //! not handle the case of more than 256 unique words, but we are relatively far //! from coming that close. //! //! With that scheme, we now have a single byte for every 64 codepoints. //! //! We further chunk these by some constant N (between 1 and 64 per group, //! dynamically chosen for smallest size), and again deduplicate and store in an //! array (u8 -> [u8; N]). //! //! The bytes of this array map into the words from the bitset above, but we //! apply another trick here: some of these words are similar enough that they //! can be represented by some function of another word. The particular //! functions chosen are rotation, inversion, and shifting (right). //! //! ## The skiplist //! //! The skip list arose out of the desire for an even smaller encoding than the //! bitset -- and was the answer to the question "what is the smallest //! representation we can imagine?". However, it is not necessarily the //! smallest, and if you have a better proposal, please do suggest it! //! //! This is a relatively straightforward encoding. First, we break up all the //! ranges in the input data into offsets from each other, essentially a gap //! encoding. In practice, most gaps are small -- less than u8::MAX -- so we //! store those directly. We make use of the larger gaps (which are nicely //! interspersed already) throughout the dataset to index this data set. //! //! In particular, each run of small gaps (terminating in a large gap) is //! indexed in a separate dataset. That data set stores an index into the //! primary offset list and a prefix sum of that offset list. These are packed //! into a single u32 (11 bits for the offset, 21 bits for the prefix sum). //! //! Lookup proceeds via a binary search in the index and then a straightforward //! linear scan (adding up the offsets) until we reach the needle, and then the //! index of that offset is utilized as the answer to whether we're in the set //! or not. use std::collections::{BTreeMap, HashMap}; use std::ops::Range; use ucd_parse::Codepoints; mod case_mapping; mod raw_emitter; mod skiplist; mod unicode_download; use raw_emitter::{emit_codepoints, RawEmitter}; static PROPERTIES: &[&str] = &[ "Alphabetic", "Lowercase", "Uppercase", "Cased", "Case_Ignorable", "Grapheme_Extend", "White_Space", "Cc", "N", ]; struct UnicodeData { ranges: Vec<(&'static str, Vec<Range<u32>>)>, to_upper: BTreeMap<u32, (u32, u32, u32)>, to_lower: BTreeMap<u32, (u32, u32, u32)>, } fn to_mapping(origin: u32, codepoints: Vec<ucd_parse::Codepoint>) -> Option<(u32, u32, u32)> { let mut a = None; let mut b = None; let mut c = None; for codepoint in codepoints { if origin == codepoint.value() { return None; } if a.is_none() { a = Some(codepoint.value()); } else if b.is_none() { b = Some(codepoint.value()); } else if c.is_none() { c = Some(codepoint.value()); } else { panic!("more than 3 mapped codepoints") } } Some((a.unwrap(), b.unwrap_or(0), c.unwrap_or(0))) } static UNICODE_DIRECTORY: &str = "unicode-downloads"; fn load_data() -> UnicodeData { unicode_download::fetch_latest(); let mut properties = HashMap::new(); for row in ucd_parse::parse::<_, ucd_parse::CoreProperty>(&UNICODE_DIRECTORY).unwrap() { if let Some(name) = PROPERTIES.iter().find(|prop| **prop == row.property.as_str()) { properties.entry(*name).or_insert_with(Vec::new).push(row.codepoints); } } for row in ucd_parse::parse::<_, ucd_parse::Property>(&UNICODE_DIRECTORY).unwrap() { if let Some(name) = PROPERTIES.iter().find(|prop| **prop == row.property.as_str()) { properties.entry(*name).or_insert_with(Vec::new).push(row.codepoints); } } let mut to_lower = BTreeMap::new(); let mut to_upper = BTreeMap::new(); for row in ucd_parse::UnicodeDataExpander::new( ucd_parse::parse::<_, ucd_parse::UnicodeData>(&UNICODE_DIRECTORY).unwrap(), ) { let general_category = if ["Nd", "Nl", "No"].contains(&row.general_category.as_str()) { "N" } else { row.general_category.as_str() }; if let Some(name) = PROPERTIES.iter().find(|prop| **prop == general_category) { properties .entry(*name) .or_insert_with(Vec::new) .push(Codepoints::Single(row.codepoint)); } if let Some(mapped) = row.simple_lowercase_mapping { if mapped != row.codepoint { to_lower.insert(row.codepoint.value(), (mapped.value(), 0, 0)); } } if let Some(mapped) = row.simple_uppercase_mapping { if mapped != row.codepoint { to_upper.insert(row.codepoint.value(), (mapped.value(), 0, 0)); } } } for row in ucd_parse::parse::<_, ucd_parse::SpecialCaseMapping>(&UNICODE_DIRECTORY).unwrap() { if !row.conditions.is_empty() { // Skip conditional case mappings continue; } let key = row.codepoint.value(); if let Some(lower) = to_mapping(key, row.lowercase) { to_lower.insert(key, lower); } if let Some(upper) = to_mapping(key, row.uppercase) { to_upper.insert(key, upper); } } let mut properties: HashMap<&'static str, Vec<Range<u32>>> = properties .into_iter() .map(|(k, v)| { ( k, v.into_iter() .flat_map(|codepoints| match codepoints { Codepoints::Single(c) => c .scalar() .map(|ch| (ch as u32..ch as u32 + 1)) .into_iter() .collect::<Vec<_>>(), Codepoints::Range(c) => c .into_iter() .flat_map(|c| c.scalar().map(|ch| (ch as u32..ch as u32 + 1))) .collect::<Vec<_>>(), }) .collect::<Vec<Range<u32>>>(), ) }) .collect(); for ranges in properties.values_mut() { merge_ranges(ranges); } let mut properties = properties.into_iter().collect::<Vec<_>>(); properties.sort_by_key(|p| p.0); UnicodeData { ranges: properties, to_lower, to_upper } } fn main() { let write_location = std::env::args().nth(1).unwrap_or_else(|| { eprintln!("Must provide path to write unicode tables to"); eprintln!( "e.g. {} library/core/unicode/unicode_data.rs", std::env::args().next().unwrap_or_default() ); std::process::exit(1); }); // Optional test path, which is a Rust source file testing that the unicode // property lookups are correct. let test_path = std::env::args().nth(2); let unicode_data = load_data(); let ranges_by_property = &unicode_data.ranges; if let Some(path) = test_path { std::fs::write(&path, generate_tests(&write_location, &ranges_by_property)).unwrap(); } let mut total_bytes = 0; let mut modules = Vec::new(); for (property, ranges) in ranges_by_property { let datapoints = ranges.iter().map(|r| r.end - r.start).sum::<u32>(); let mut emitter = RawEmitter::new(); emit_codepoints(&mut emitter, &ranges); modules.push((property.to_lowercase().to_string(), emitter.file)); println!( "{:15}: {} bytes, {} codepoints in {} ranges ({} - {}) using {}", property, emitter.bytes_used, datapoints, ranges.len(), ranges.first().unwrap().start, ranges.last().unwrap().end, emitter.desc, ); total_bytes += emitter.bytes_used; } let mut table_file = String::new(); table_file.push_str( "///! This file is generated by src/tools/unicode-table-generator; do not edit manually!\n", ); // Include the range search function table_file.push('\n'); table_file.push_str(include_str!("range_search.rs")); table_file.push('\n'); table_file.push_str(&version()); table_file.push('\n'); modules.push((String::from("conversions"), case_mapping::generate_case_mapping(&unicode_data))); for (name, contents) in modules { table_file.push_str("#[rustfmt::skip]\n"); table_file.push_str(&format!("pub mod {} {{\n", name)); for line in contents.lines() { if !line.trim().is_empty() { table_file.push_str(" "); table_file.push_str(&line); } table_file.push('\n'); } table_file.push_str("}\n\n"); } std::fs::write(&write_location, format!("{}\n", table_file.trim_end())).unwrap(); println!("Total table sizes: {} bytes", total_bytes); } fn version() -> String { let mut out = String::new(); out.push_str("pub const UNICODE_VERSION: (u8, u8, u8) = "); let readme = std::fs::read_to_string(std::path::Path::new(UNICODE_DIRECTORY).join("ReadMe.txt")) .unwrap(); let prefix = "for Version "; let start = readme.find(prefix).unwrap() + prefix.len(); let end = readme.find(" of the Unicode Standard.").unwrap(); let version = readme[start..end].split('.').map(|v| v.parse::<u32>().expect(&v)).collect::<Vec<_>>(); let [major, minor, micro] = [version[0], version[1], version[2]]; out.push_str(&format!("({}, {}, {});\n", major, minor, micro)); out } fn fmt_list<V: std::fmt::Debug>(values: impl IntoIterator<Item = V>) -> String { let pieces = values.into_iter().map(|b| format!("{:?}, ", b)).collect::<Vec<_>>(); let mut out = String::new(); let mut line = String::from("\n "); for piece in pieces { if line.len() + piece.len() < 98 { line.push_str(&piece); } else { out.push_str(line.trim_end()); out.push('\n'); line = format!(" {}", piece); } } out.push_str(line.trim_end()); out.push('\n'); out } fn generate_tests(data_path: &str, ranges: &[(&str, Vec<Range<u32>>)]) -> String { let mut s = String::new(); s.push_str("#![allow(incomplete_features, unused)]\n"); s.push_str("#![feature(const_generics)]\n\n"); s.push_str("\n#[allow(unused)]\nuse std::hint;\n"); s.push_str(&format!("#[path = \"{}\"]\n", data_path)); s.push_str("mod unicode_data;\n\n"); s.push_str("\nfn main() {\n"); for (property, ranges) in ranges { s.push_str(&format!(r#" println!("Testing {}");"#, property)); s.push('\n'); s.push_str(&format!(" {}_true();\n", property.to_lowercase())); s.push_str(&format!(" {}_false();\n", property.to_lowercase())); let mut is_true = Vec::new(); let mut is_false = Vec::new(); for ch_num in 0..(std::char::MAX as u32) { if std::char::from_u32(ch_num).is_none() { continue; } if ranges.iter().any(|r| r.contains(&ch_num)) { is_true.push(ch_num); } else { is_false.push(ch_num); } } s.push_str(&format!(" fn {}_true() {{\n", property.to_lowercase())); generate_asserts(&mut s, property, &is_true, true); s.push_str(" }\n\n"); s.push_str(&format!(" fn {}_false() {{\n", property.to_lowercase())); generate_asserts(&mut s, property, &is_false, false); s.push_str(" }\n\n"); } s.push_str("}"); s } fn generate_asserts(s: &mut String, property: &str, points: &[u32], truthy: bool) { for range in ranges_from_set(points) { if range.end == range.start + 1 { s.push_str(&format!( " assert!({}unicode_data::{}::lookup({:?}), \"{}\");\n", if truthy { "" } else { "!" }, property.to_lowercase(), std::char::from_u32(range.start).unwrap(), range.start, )); } else { s.push_str(&format!(" for chn in {:?}u32 {{\n", range)); s.push_str(&format!( " assert!({}unicode_data::{}::lookup(std::char::from_u32(chn).unwrap()), \"{{:?}}\", chn);\n", if truthy { "" } else { "!" }, property.to_lowercase(), )); s.push_str(" }\n"); } } } fn ranges_from_set(set: &[u32]) -> Vec<Range<u32>> { let mut ranges = set.iter().map(|e| (*e)..(*e + 1)).collect::<Vec<Range<u32>>>(); merge_ranges(&mut ranges); ranges } fn merge_ranges(ranges: &mut Vec<Range<u32>>) { loop { let mut new_ranges = Vec::new(); let mut idx_iter = 0..(ranges.len() - 1); let mut should_insert_last = true; while let Some(idx) = idx_iter.next() { let cur = ranges[idx].clone(); let next = ranges[idx + 1].clone(); if cur.end == next.start { if idx_iter.next().is_none() { // We're merging the last element should_insert_last = false; } new_ranges.push(cur.start..next.end); } else { // We're *not* merging the last element should_insert_last = true; new_ranges.push(cur); } } if should_insert_last { new_ranges.push(ranges.last().unwrap().clone()); } if new_ranges.len() == ranges.len() { *ranges = new_ranges; break; } else { *ranges = new_ranges; } } let mut last_end = None; for range in ranges { if let Some(last) = last_end { assert!(range.start > last, "{:?}", range); } last_end = Some(range.end); } }
36.654545
121
0.576017
7a7b76368e37cadf68216fff9d3b15e584cc8224
1,544
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! Test infrastructure for the Libra VM. //! //! This crate contains helpers for executing tests against the Libra VM. use libra_types::{transaction::TransactionStatus, vm_status::VMStatus}; #[cfg(test)] mod tests; pub mod account; pub mod account_universe; pub mod common_transactions; pub mod compile; pub mod data_store; pub mod execution_strategies; pub mod executor; pub mod gas_costs; pub mod keygen; mod proptest_types; pub fn assert_status_eq(s1: &VMStatus, s2: &VMStatus) -> bool { // TODO(tmn) After providing real abort locations, use normal equality assert_eq!(s1.status_code(), s2.status_code()); assert_eq!(s1.move_abort_code(), s2.move_abort_code()); true } pub fn transaction_status_eq(t1: &TransactionStatus, t2: &TransactionStatus) -> bool { match (t1, t2) { (TransactionStatus::Discard(s1), TransactionStatus::Discard(s2)) | (TransactionStatus::Keep(s1), TransactionStatus::Keep(s2)) => assert_status_eq(s1, s2), _ => false, } } #[macro_export] macro_rules! assert_prologue_parity { ($e1:expr, $e2:expr, $e3:expr) => { assert_status_eq(&$e1.unwrap(), &$e3); assert!(transaction_status_eq($e2, &TransactionStatus::Discard($e3))); }; } #[macro_export] macro_rules! assert_prologue_disparity { ($e1:expr => $e2:expr, $e3:expr => $e4:expr) => { assert_eq!($e1, $e2); assert!(transaction_status_eq($e3, &$e4)); }; }
27.571429
97
0.686528
64f9565382886d3ac355f905881ec8ea18720c35
389
// Test that we don't ICE after trying to construct a cross-file snippet #63800. // compile-flags: --test #[macro_use] #[path = "move-error-snippets-ext.rs"] mod move_error_snippets_ext; struct A; macro_rules! sss { () => { #[test] fn fff() { static D: A = A; aaa!(D); //~ ERROR cannot move } }; } sss!(); fn main() {}
16.208333
80
0.526992
bf9e66981e30a9edf8e970719b4d32ff38309ef1
4,958
#![allow(clippy::ptr_arg)] //! # Nearest Neighbors Search Algorithms and Data Structures //! //! Nearest neighbor search is a basic computational tool that is particularly relevant to machine learning, //! where it is often believed that highdimensional datasets have low-dimensional intrinsic structure. //! The basic nearest neighbor problem is formalized as follows: given a set \\( S \\) of \\( n \\) points in some metric space \\( (X, d) \\), //! the problem is to preprocess \\( S \\) so that given a query point \\( p \in X \\), one can efficiently find a point \\( q \in S \\) //! which minimizes \\( d(p, q) \\). //! //! [The most straightforward nearest neighbor search algorithm](linear_search/index.html) finds k nearest points using the brute-force approach where distances between all //! pairs of points in the dataset are calculated. This approach scales as \\( O(nd^2) \\) where \\( n = \lvert S \rvert \\), is number of samples and \\( d \\) is number //! of dimentions in metric space. As the number of samples grows, the brute-force approach quickly becomes infeasible. //! //! [Cover Tree](cover_tree/index.html) is data structure that partitions metric spaces to speed up nearest neighbor search. Cover tree requires \\( O(n) \\) space and //! have nice theoretical properties: //! //! * construction time: \\( O(c^6n \log n) \\), //! * insertion time \\( O(c^6 \log n) \\), //! * removal time: \\( O(c^6 \log n) \\), //! * query time: \\( O(c^{12} \log n) \\), //! //! Where \\( c \\) is a constant. //! //! ## References: //! * ["The Art of Computer Programming" Knuth, D, Vol. 3, 2nd ed, Sorting and Searching, 1998](https://www-cs-faculty.stanford.edu/~knuth/taocp.html) //! * ["Cover Trees for Nearest Neighbor" Beygelzimer et al., Proceedings of the 23rd international conference on Machine learning, ICML'06 (2006)](https://hunch.net/~jl/projects/cover_tree/cover_tree.html) //! * ["Faster cover trees." Izbicki et al., Proceedings of the 32nd International Conference on Machine Learning, ICML'15 (2015)](http://www.cs.ucr.edu/~cshelton/papers/index.cgi%3FIzbShe15) //! * ["The Elements of Statistical Learning: Data Mining, Inference, and Prediction" Trevor et al., 2nd edition, chapter 13](https://web.stanford.edu/~hastie/ElemStatLearn/) //! //! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> //! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> use crate::algorithm::neighbour::cover_tree::CoverTree; use crate::algorithm::neighbour::linear_search::LinearKNNSearch; use crate::error::Failed; use crate::math::distance::Distance; use crate::math::num::RealNumber; use serde::{Deserialize, Serialize}; pub(crate) mod bbd_tree; /// tree data structure for fast nearest neighbor search pub mod cover_tree; /// very simple algorithm that sequentially checks each element of the list until a match is found or the whole list has been searched. pub mod linear_search; /// Both, KNN classifier and regressor benefits from underlying search algorithms that helps to speed up queries. /// `KNNAlgorithmName` maintains a list of supported search algorithms, see [KNN algorithms](../algorithm/neighbour/index.html) #[derive(Serialize, Deserialize, Debug, Clone)] pub enum KNNAlgorithmName { /// Heap Search algorithm, see [`LinearSearch`](../algorithm/neighbour/linear_search/index.html) LinearSearch, /// Cover Tree Search algorithm, see [`CoverTree`](../algorithm/neighbour/cover_tree/index.html) CoverTree, } #[derive(Serialize, Deserialize, Debug)] pub(crate) enum KNNAlgorithm<T: RealNumber, D: Distance<Vec<T>, T>> { LinearSearch(LinearKNNSearch<Vec<T>, T, D>), CoverTree(CoverTree<Vec<T>, T, D>), } impl KNNAlgorithmName { pub(crate) fn fit<T: RealNumber, D: Distance<Vec<T>, T>>( &self, data: Vec<Vec<T>>, distance: D, ) -> Result<KNNAlgorithm<T, D>, Failed> { match *self { KNNAlgorithmName::LinearSearch => { LinearKNNSearch::new(data, distance).map(KNNAlgorithm::LinearSearch) } KNNAlgorithmName::CoverTree => { CoverTree::new(data, distance).map(KNNAlgorithm::CoverTree) } } } } impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNAlgorithm<T, D> { pub fn find(&self, from: &Vec<T>, k: usize) -> Result<Vec<(usize, T, &Vec<T>)>, Failed> { match *self { KNNAlgorithm::LinearSearch(ref linear) => linear.find(from, k), KNNAlgorithm::CoverTree(ref cover) => cover.find(from, k), } } pub fn find_radius( &self, from: &Vec<T>, radius: T, ) -> Result<Vec<(usize, T, &Vec<T>)>, Failed> { match *self { KNNAlgorithm::LinearSearch(ref linear) => linear.find_radius(from, radius), KNNAlgorithm::CoverTree(ref cover) => cover.find_radius(from, radius), } } }
50.591837
206
0.671843