file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
lz4.rs
use std::fs::File; use std::path::Path; use std::io::Read; let stream = File::open(&Path::new("path/to/file.lz4")).unwrap(); let mut decompressed = Vec::new(); lz4::Decoder::new(stream).read_to_end(&mut decompressed); ``` # Credit This implementation is largely based on Branimir Karadžić's implementation which can be found at https://github.com/bkaradzic/go-lz4. */ use std::cmp; use std::ptr::copy_nonoverlapping; use std::io::{self, Read, Write}; use std::iter::repeat; use std::vec::Vec; use std::num::Wrapping; use std::ops::Shr; use super::byteorder::{LittleEndian, WriteBytesExt, ReadBytesExt}; use super::{ReadExact, byteorder_err_to_io}; const MAGIC: u32 = 0x184d2204; const ML_BITS: u32 = 4; const ML_MASK: u32 = (1 << ML_BITS as usize) - 1; const RUN_BITS: u32 = 8 - ML_BITS; const RUN_MASK: u32 = (1 << RUN_BITS as usize) - 1; const MIN_MATCH: u32 = 4; const HASH_LOG: u32 = 17; const HASH_TABLE_SIZE: u32 = 1 << (HASH_LOG as usize); const HASH_SHIFT: u32 = (MIN_MATCH * 8) - HASH_LOG; const INCOMPRESSIBLE: u32 = 128; const UNINITHASH: u32 = 0x88888888; const MAX_INPUT_SIZE: u32 = 0x7e000000; struct BlockDecoder<'a> { input: &'a [u8], output: &'a mut Vec<u8>, cur: usize, start: usize, end: usize, } impl<'a> BlockDecoder<'a> { /// Decodes this block of data from 'input' to 'output', returning the /// number of valid bytes in the output. fn decode(&mut self) -> usize { while self.cur < self.input.len() { let code = self.bump(); debug!("block with code: {:x}", code); // Extract a chunk of data from the input to the output. { let len = self.length(code >> 4); debug!("consume len {}", len); if len > 0 { let end = self.end; self.grow_output(end + len); unsafe { copy_nonoverlapping( &self.input[self.cur], &mut self.output[end], len )}; self.end += len; self.cur += len; } } if self.cur == self.input.len() { break } // Read off the next i16 offset { let back = (self.bump() as usize) | ((self.bump() as usize) << 8); debug!("found back {}", back); self.start = self.end - back; } // Slosh around some bytes now { let mut len = self.length(code & 0xf); let literal = self.end - self.start; if literal < 4 { static DECR: [usize; 4] = [0, 3, 2, 3]; self.cp(4, DECR[literal]); } else { len += 4; } self.cp(len, 0); } } self.end } fn length(&mut self, code: u8) -> usize { let mut ret = code as usize; if code == 0xf { loop { let tmp = self.bump(); ret += tmp as usize; if tmp!= 0xff { break } } } ret } fn bump(&mut self) -> u8 { let ret = self.input[self.cur]; self.cur += 1; ret } #[inline] fn cp(&mut self, len: usize, decr: usize) { let end = self.end; self.grow_output(end + len); for i in 0..len { self.output[end + i] = (*self.output)[self.start + i]; } self.end += len; self.start += len - decr; } // Extends the output vector to a target number of bytes (in total), but // does not actually initialize the new data. The length of the vector is // updated, but the bytes will all have undefined values. It is assumed that // the next operation is to pave over these bytes (so the initialization is // unnecessary). #[inline] fn grow_output(&mut self, target: usize) { if self.output.capacity() < target { debug!("growing {} to {}", self.output.capacity(), target); //let additional = target - self.output.capacity(); //self.output.reserve(additional); while self.output.len() < target { self.output.push(0); } }else { unsafe { self.output.set_len(target); } } } } struct BlockEncoder<'a> { input: &'a [u8], output: &'a mut Vec<u8>, hash_table: Vec<u32>, pos: u32, anchor: u32, dest_pos: u32 } /// Returns maximum possible size of compressed output /// given source size pub fn compression_bound(size: u32) -> Option<u32> { if size > MAX_INPUT_SIZE { None } else { Some(size + (size / 255) + 16 + 4) } } impl<'a> BlockEncoder<'a> { #[inline(always)] fn seq_at(&self, pos: u32) -> u32 { (self.input[pos as usize + 3] as u32) << 24 | (self.input[pos as usize + 2] as u32) << 16 | (self.input[pos as usize + 1] as u32) << 8 | (self.input[pos as usize] as u32) } fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) { let mut ln = len; let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 }; if ml_len > ML_MASK - 1 { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8; } else { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8; } self.dest_pos += 1; if code == RUN_MASK as u8 { ln -= RUN_MASK; while ln > 254 { self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; ln -= 255; } self.output[self.dest_pos as usize] = ln as u8; self.dest_pos += 1; } // FIXME: find out why slicing syntax fails tests //self.output[self.dest_pos as usize.. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint]; for i in 0..(len as usize) { self.output[self.dest_pos as usize + i] = self.input[pos as usize + i]; } self.dest_pos += len; } fn encode(&mut self) -> u32 { let input_len = self.input.len() as u32; match compression_bound(input_len) { None => 0, Some(out_size) => { let out_size_usize = out_size as usize; if self.output.capacity() < out_size_usize { let additional = out_size_usize - self.output.capacity(); self.output.reserve(additional); } unsafe {self.output.set_len(out_size_usize); } let mut step = 1u32; let mut limit = INCOMPRESSIBLE; loop { if self.pos + 12 > input_len { let tmp = self.anchor; self.write_literals(self.input.len() as u32 - tmp, 0, tmp); unsafe { self.output.set_len(self.dest_pos as usize) }; return self.dest_pos; } let seq = self.seq_at(self.pos); let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0; let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0; self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0; if (Wrapping(self.pos) - Wrapping(r)).shr(16).0!= 0 || seq!= self.seq_at(r) { if self.pos - self.anchor > limit { limit = limit << 1; step += 1 + (step >> 2); } self.pos += step; continue; } if step > 1 { self.hash_table[hash as usize] = r - UNINITHASH; self.pos -= step - 1; step = 1; continue; } limit = INCOMPRESSIBLE; let ln = self.pos - self.anchor; let back = self.pos - r; let anchor = self.anchor; self.pos += MIN_MATCH; r += MIN_MATCH; self.anchor = self.pos; while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] { self.pos += 1; r += 1 } let mut ml_len = self.pos - self.anchor; self.write_literals(ln, ml_len, anchor); self.output[self.dest_pos as usize] = back as u8; self.output[self.dest_pos as usize + 1] = (back >> 8) as u8; self.dest_pos += 2; if ml_len > ML_MASK - 1 { ml_len -= ML_MASK; while ml_len > 254 { ml_len -= 255; self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; } self.output[self.dest_pos as usize] = ml_len as u8; self.dest_pos += 1; } self.anchor = self.pos; } } } } } /// This structure is used to decode a stream of LZ4 blocks. This wraps an /// internal reader which is read from when this decoder's read method is /// called. pub struct Decoder<R> { /// The internally wrapped reader. This is exposed so it may be moved out /// of. Note that if data is read from the reader while decoding is in /// progress the output stream will get corrupted. pub r: R, temp: Vec<u8>, output: Vec<u8>, start: usize, end: usize, eof: bool, header: bool, blk_checksum: bool, stream_checksum: bool, max_block_size: usize, } impl<R: Read + Sized> Decoder<R> { /// Creates a new decoder which will read data from the given stream. The /// inner stream can be re-acquired by moving out of the `r` field of this /// structure. pub fn new(r: R) -> Decoder<R> { Decoder { r: r, temp: Vec::new(), output: Vec::new(), header: false, blk_checksum: false, stream_checksum: false, start: 0, end: 0, eof: false, max_block_size: 0, } } /// Resets this decoder back to its initial state. Note that the underlying /// stream is not seeked on or has any alterations performed on it. pub fn reset(&mut self) { self.header = false; self.eof = false; self.start = 0; self.end = 0; } fn read_header(&mut self) -> io::Result<()> { // Make sure the magic number is what's expected. if try!(self.r.read_u32::<LittleEndian>())!= MAGIC { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } let mut bits = [0; 3]; try!(self.r.read(&mut bits[..2])); let flg = bits[0]; let bd = bits[1]; // bits 7/6, the version number. Right now this must be 1 if (flg >> 6)!= 0b01 { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } // bit 5 is the "block independence", don't care about this yet // bit 4 is whether blocks have checksums or not self.blk_checksum = (flg & 0x10)!= 0; // bit 3 is whether there is a following stream size let stream_size = (flg & 0x08)!= 0; // bit 2 is whether there is a stream checksum self.stream_checksum = (flg & 0x04)!= 0; // bit 1 is reserved // bit 0 is whether there is a preset dictionary let preset_dictionary = (flg & 0x01)!= 0; static MAX_SIZES: [usize; 8] = [0, 0, 0, 0, // all N/A 64 << 10, // 64KB 256 << 10, // 256 KB 1 << 20, // 1MB 4 << 20]; // 4MB // bit 7 is reserved // bits 6-4 are the maximum block size let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7]; // bits 3-0 are reserved // read off other portions of the stream let size = if stream_size { Some(try!(self.r.read_u64::<LittleEndian>())) } else { None }; assert!(!preset_dictionary, "preset dictionaries not supported yet"); debug!("blk: {}", self.blk_checksum); debug!("stream: {}", self.stream_checksum); debug!("max size: {}", max_block_size); debug!("stream size: {:?}", size); self.max_block_size = max_block_size; // XXX: implement checksums let cksum = try!(self.r.read_u8()); debug!("ignoring header checksum: {}", cksum); return Ok(()); } fn decode_block(&mut self) -> io::Result<bool> { match try!(self.r.read_u32::<LittleEndian>()) { // final block, we're done here 0 => return Ok(false), // raw block to read n if n & 0x80000000!= 0 => { let amt = (n & 0x7fffffff) as usize; self.output.truncate(0); self.output.reserve(amt); try!(self.r.push_exactly(amt as u64, &mut self.output)); self.start = 0; self.end = amt; } // actual block to decompress n => { let n = n as usize; self.temp.truncate(0); self.temp.reserve(n); try!(self.r.push_exactly(n as u64, &mut self.temp)); let target = cmp::min(self.max_block_size, 4 * n / 3); self.output.truncate(0); self.output.reserve(target); let mut decoder = BlockDecoder { input: &self.temp[..n], output: &mut self.output, cur: 0, start: 0, end: 0, }; self.start = 0; self.end = decoder.decode(); } } if self.blk_checksum { let cksum = try!(self.r.read_u32::<LittleEndian>()); debug!("ignoring block checksum {}", cksum); } return Ok(true); } /// Tests whether the end of this LZ4 stream has been reached pub fn eof(&mut self) -> bool { self.eof } } impl<R: Read> Read for Decoder<R> { fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { if self.eof { return Ok(0) } if!self.header { try!(self.read_header()); self.header = true; } let mut amt = dst.len(); let len = amt; while amt > 0 { if self.start == self.end { let keep_going = try!(self.decode_block()); if!keep_going { self.eof = true; break; } } let n = cmp::min(amt, self.end - self.start); unsafe { copy_nonoverlapping( &self.output[self.start], &mut dst[len - amt], n )}; self.start += n; amt -= n; } Ok(len - amt) } } /// This structure is used to compress a stream of bytes using the LZ4 /// compression algorithm. This is a wrapper around an internal writer which ///
# Example ```rust,ignore use compress::lz4;
random_line_split
pools.rs
use crate::*; use serde::{Deserialize, Serialize}; impl BlockFrostApi { endpoints! { /// List of registered stake pools. pools() -> Vec<String> => "/pools"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools/get"), /// List of already retired pools. pools_retired() -> Vec<RetiredPool> => "/pools/retired"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retired/get"), /// List of retiring stake pools. pools_retiring() -> Vec<RetiringPool> => "/pools/retired"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retiring/get"), /// Pool information. pools_by_id(pool_id: &str) -> Pool => "/pools/{pool_id}"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}/get"), /// History of stake pool parameters over epochs. pools_history(pool_id: &str) -> Vec<PoolHistory> => "/pools/{pool_id}/history"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1history/get"), /// Stake pool registration metadata. pools_metadata(pool_id: &str) -> PoolMetadata => "/pools/{pool_id}/metadata"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1metadata/get"), /// Relays of a stake pool. pools_relays(pool_id: &str) -> Vec<PoolRelay> => "/pools/{pool_id}/relays"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1relays/get"), /// List of current stake pools delegators. pools_delegators(pool_id: &str) -> Vec<PoolDelegator> => "/pools/{pool_id}/delegators"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1delegators/get"), /// List of stake pool blocks. pools_blocks(pool_id: &str) -> Vec<String> => "/pools/{pool_id}/blocks"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1blocks/get"), /// List of certificate updates to the stake pool. pools_updates(pool_id: &str) -> Vec<PoolUpdate> => "/pools/{pool_id}/updates"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1updates/get"), } } /// Created by [`pools_retired`](BlockFrostApi::pools_retired) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RetiredPool { /// Bech32 encoded pool ID. pub pool_id: String, /// Retirement epoch number. pub epoch: Integer, } /// Created by [`pools_retiring`](BlockFrostApi::pools_retiring) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RetiringPool { /// Bech32 encoded pool ID. pub pool_id: String, /// Retirement epoch number. pub epoch: Integer, } /// Created by [`pools_by_id`](BlockFrostApi::pools_by_id) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Pool { /// Bech32 pool ID. pub pool_id: String, /// Hexadecimal pool ID. pub hex: String, /// VRF key hash. pub vrf_key: String, /// Total minted blocks. pub blocks_minted: Integer, pub live_stake: String, pub live_size: Float, pub live_saturation: Float, pub live_delegators: Integer, pub active_stake: String, pub active_size: Float, /// Stake pool certificate pledge. pub declared_pledge: String, /// Stake pool urrent pledge. pub live_pledge: String, /// Margin tax cost of the stake pool. pub margin_cost: Float, /// Fixed tax cost of the stake pool. pub fixed_cost: String, /// Bech32 reward account of the stake pool. pub reward_account: String, pub owners: Vec<String>, pub registration: Vec<String>, pub retirement: Vec<String>, } /// Created by [`pools_history`](BlockFrostApi::pools_history) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolHistory { /// Epoch number. pub epoch: Integer, /// Number of blocks created by pool. pub blocks: Integer, /// Active (Snapshot of live stake 2 epochs ago) stake in Lovelaces. pub active_stake: String, /// Pool size (percentage) of overall active stake at that epoch. pub active_size: Float, /// Number of delegators for epoch. pub delegators_count: Integer, /// Total rewards received before distribution to delegators. pub rewards: String, /// Pool operator rewards. pub fees: String, } /// Created by [`pools_metadata`](BlockFrostApi::pools_metadata) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolMetadata { /// Bech32 pool ID. pub pool_id: String, /// Hexadecimal pool ID. pub hex: String, /// URL to the stake pool metadata. pub url: Option<String>, /// Hash of the metadata file. pub hash: Option<String>, /// Ticker of the stake pool. pub ticker: Option<String>, /// Name of the stake pool. pub name: Option<String>, /// Description of the stake pool. pub description: Option<String>, /// Home page of the stake pool. pub homepage: Option<String>, } /// Created by [`pools_relays`](BlockFrostApi::pools_relays) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolRelay { /// IPv4 address of the relay. pub ipv4: Option<String>, /// IPv6 address of the relay. pub ipv6: Option<String>, /// DNS name of the relay. pub dns: Option<String>, /// DNS SRV entry of the relay. pub dns_srv: Option<String>, /// Network port of the relay. pub port: Integer, } /// Created by [`pools_delegators`](BlockFrostApi::pools_delegators) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolDelegator { /// Bech32 encoded stake addresses. pub address: String, /// Currently delegated amount. pub live_stake: String, } /// Created by [`pools_updates`](BlockFrostApi::pools_updates) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolUpdate { /// Transaction ID. pub tx_hash: String, /// Certificate within the transaction. pub cert_index: Integer, /// Action in the certificate. pub action: ActionType, // "registered" | "deregistered" } #[cfg(test)] mod tests { use super::*; test_schema! { test_pools, Vec<String>, r#" [ "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "pool1hn7hlwrschqykupwwrtdfkvt2u4uaxvsgxyh6z63703p2knj288", "pool1ztjyjfsh432eqetadf82uwuxklh28xc85zcphpwq6mmezavzad2" ] "# } test_schema! { test_pools_retired, Vec<RetiredPool>, r#" [ { "pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q", "epoch": 225 }, { "pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx", "epoch": 215 }, { "pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w", "epoch": 231 } ] "# } test_schema! { test_pools_retiring, Vec<RetiringPool>, r#" [ { "pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q", "epoch": 225 }, { "pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx", "epoch": 215 }, { "pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w", "epoch": 231 } ] "# } test_schema! { test_pools_by_id, Pool, r#" { "pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735", "vrf_key": "0b5245f9934ec2151116fb8ec00f35fd00e0aa3b075c4ed12cce440f999d8233", "blocks_minted": 69, "live_stake": "6900000000", "live_size": 0.42, "live_saturation": 0.93, "live_delegators": 127, "active_stake": "4200000000", "active_size": 0.43, "declared_pledge": "5000000000", "live_pledge": "5000000001", "margin_cost": 0.05, "fixed_cost": "340000000", "reward_account": "stake1uxkptsa4lkr55jleztw43t37vgdn88l6ghclfwuxld2eykgpgvg3f", "owners": [ "stake1u98nnlkvkk23vtvf9273uq7cph5ww6u2yq2389psuqet90sv4xv9v" ], "registration": [ "9f83e5484f543e05b52e99988272a31da373f3aab4c064c76db96643a355d9dc", "7ce3b8c433bf401a190d58c8c483d8e3564dfd29ae8633c8b1b3e6c814403e95", "3e6e1200ce92977c3fe5996bd4d7d7e192bcb7e231bc762f9f240c76766535b9" ], "retirement": [ "252f622976d39e646815db75a77289cf16df4ad2b287dd8e3a889ce14c13d1a8" ] } "# } test_schema! { test_pools_history, Vec<PoolHistory>, r#" [ { "epoch": 233, "blocks": 22, "active_stake": "20485965693569", "active_size": 1.2345, "delegators_count": 115, "rewards": "206936253674159", "fees": "1290968354" } ] "# } test_schema! { test_pools_metadata, PoolMetadata, r#" { "pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735", "url": "https://stakenuts.com/mainnet.json", "hash": "47c0c68cb57f4a5b4a87bad896fc274678e7aea98e200fa14a1cb40c0cab1d8c", "ticker": "NUTS", "name": "Stake Nuts", "description": "The best pool ever", "homepage": "https://stakentus.com/" } "# } test_schema! { test_pools_relays, Vec<PoolRelay>, r#" [ { "ipv4": "4.4.4.4",
] "# } test_schema! { test_pool_delegators, Vec<PoolDelegator>, r#" [ { "address": "stake1ux4vspfvwuus9uwyp5p3f0ky7a30jq5j80jxse0fr7pa56sgn8kha", "live_stake": "1137959159981411" }, { "address": "stake1uylayej7esmarzd4mk4aru37zh9yz0luj3g9fsvgpfaxulq564r5u", "live_stake": "16958865648" }, { "address": "stake1u8lr2pnrgf8f7vrs9lt79hc3sxm8s2w4rwvgpncks3axx6q93d4ck", "live_stake": "18605647" } ] "# } test_schema! { test_pools_blocks, Vec<String>, r#" [ "d8982ca42cfe76b747cc681d35d671050a9e41e9cfe26573eb214e94fe6ff21d", "026436c539e2ce84c7f77ffe669f4e4bbbb3b9c53512e5857dcba8bb0b4e9a8c", "bcc8487f419b8c668a18ea2120822a05df6dfe1de1f0fac3feba88cf760f303c", "86bf7b4a274e0f8ec9816171667c1b4a0cfc661dc21563f271acea9482b62df7" ] "# } test_schema! { test_pools_updates, Vec<PoolUpdate>, r#" [ { "tx_hash": "6804edf9712d2b619edb6ac86861fe93a730693183a262b165fcc1ba1bc99cad", "cert_index": 0, "action": "registered" }, { "tx_hash": "9c190bc1ac88b2ab0c05a82d7de8b71b67a9316377e865748a89d4426c0d3005", "cert_index": 0, "action": "deregistered" }, { "tx_hash": "e14a75b0eb2625de7055f1f580d70426311b78e0d36dd695a6bdc96c7b3d80e0", "cert_index": 1, "action": "registered" } ] "# } }
"ipv6": "https://stakenuts.com/mainnet.json", "dns": "relay1.stakenuts.com", "dns_srv": "_relays._tcp.relays.stakenuts.com", "port": 3001 }
random_line_split
pools.rs
use crate::*; use serde::{Deserialize, Serialize}; impl BlockFrostApi { endpoints! { /// List of registered stake pools. pools() -> Vec<String> => "/pools"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools/get"), /// List of already retired pools. pools_retired() -> Vec<RetiredPool> => "/pools/retired"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retired/get"), /// List of retiring stake pools. pools_retiring() -> Vec<RetiringPool> => "/pools/retired"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1retiring/get"), /// Pool information. pools_by_id(pool_id: &str) -> Pool => "/pools/{pool_id}"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}/get"), /// History of stake pool parameters over epochs. pools_history(pool_id: &str) -> Vec<PoolHistory> => "/pools/{pool_id}/history"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1history/get"), /// Stake pool registration metadata. pools_metadata(pool_id: &str) -> PoolMetadata => "/pools/{pool_id}/metadata"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1metadata/get"), /// Relays of a stake pool. pools_relays(pool_id: &str) -> Vec<PoolRelay> => "/pools/{pool_id}/relays"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1relays/get"), /// List of current stake pools delegators. pools_delegators(pool_id: &str) -> Vec<PoolDelegator> => "/pools/{pool_id}/delegators"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1delegators/get"), /// List of stake pool blocks. pools_blocks(pool_id: &str) -> Vec<String> => "/pools/{pool_id}/blocks"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1blocks/get"), /// List of certificate updates to the stake pool. pools_updates(pool_id: &str) -> Vec<PoolUpdate> => "/pools/{pool_id}/updates"; ("https://docs.blockfrost.io/#tag/Cardano-Pools/paths/~1pools~1{pool_id}~1updates/get"), } } /// Created by [`pools_retired`](BlockFrostApi::pools_retired) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RetiredPool { /// Bech32 encoded pool ID. pub pool_id: String, /// Retirement epoch number. pub epoch: Integer, } /// Created by [`pools_retiring`](BlockFrostApi::pools_retiring) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RetiringPool { /// Bech32 encoded pool ID. pub pool_id: String, /// Retirement epoch number. pub epoch: Integer, } /// Created by [`pools_by_id`](BlockFrostApi::pools_by_id) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct
{ /// Bech32 pool ID. pub pool_id: String, /// Hexadecimal pool ID. pub hex: String, /// VRF key hash. pub vrf_key: String, /// Total minted blocks. pub blocks_minted: Integer, pub live_stake: String, pub live_size: Float, pub live_saturation: Float, pub live_delegators: Integer, pub active_stake: String, pub active_size: Float, /// Stake pool certificate pledge. pub declared_pledge: String, /// Stake pool urrent pledge. pub live_pledge: String, /// Margin tax cost of the stake pool. pub margin_cost: Float, /// Fixed tax cost of the stake pool. pub fixed_cost: String, /// Bech32 reward account of the stake pool. pub reward_account: String, pub owners: Vec<String>, pub registration: Vec<String>, pub retirement: Vec<String>, } /// Created by [`pools_history`](BlockFrostApi::pools_history) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolHistory { /// Epoch number. pub epoch: Integer, /// Number of blocks created by pool. pub blocks: Integer, /// Active (Snapshot of live stake 2 epochs ago) stake in Lovelaces. pub active_stake: String, /// Pool size (percentage) of overall active stake at that epoch. pub active_size: Float, /// Number of delegators for epoch. pub delegators_count: Integer, /// Total rewards received before distribution to delegators. pub rewards: String, /// Pool operator rewards. pub fees: String, } /// Created by [`pools_metadata`](BlockFrostApi::pools_metadata) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolMetadata { /// Bech32 pool ID. pub pool_id: String, /// Hexadecimal pool ID. pub hex: String, /// URL to the stake pool metadata. pub url: Option<String>, /// Hash of the metadata file. pub hash: Option<String>, /// Ticker of the stake pool. pub ticker: Option<String>, /// Name of the stake pool. pub name: Option<String>, /// Description of the stake pool. pub description: Option<String>, /// Home page of the stake pool. pub homepage: Option<String>, } /// Created by [`pools_relays`](BlockFrostApi::pools_relays) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolRelay { /// IPv4 address of the relay. pub ipv4: Option<String>, /// IPv6 address of the relay. pub ipv6: Option<String>, /// DNS name of the relay. pub dns: Option<String>, /// DNS SRV entry of the relay. pub dns_srv: Option<String>, /// Network port of the relay. pub port: Integer, } /// Created by [`pools_delegators`](BlockFrostApi::pools_delegators) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolDelegator { /// Bech32 encoded stake addresses. pub address: String, /// Currently delegated amount. pub live_stake: String, } /// Created by [`pools_updates`](BlockFrostApi::pools_updates) method. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PoolUpdate { /// Transaction ID. pub tx_hash: String, /// Certificate within the transaction. pub cert_index: Integer, /// Action in the certificate. pub action: ActionType, // "registered" | "deregistered" } #[cfg(test)] mod tests { use super::*; test_schema! { test_pools, Vec<String>, r#" [ "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "pool1hn7hlwrschqykupwwrtdfkvt2u4uaxvsgxyh6z63703p2knj288", "pool1ztjyjfsh432eqetadf82uwuxklh28xc85zcphpwq6mmezavzad2" ] "# } test_schema! { test_pools_retired, Vec<RetiredPool>, r#" [ { "pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q", "epoch": 225 }, { "pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx", "epoch": 215 }, { "pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w", "epoch": 231 } ] "# } test_schema! { test_pools_retiring, Vec<RetiringPool>, r#" [ { "pool_id": "pool19u64770wqp6s95gkajc8udheske5e6ljmpq33awxk326zjaza0q", "epoch": 225 }, { "pool_id": "pool1dvla4zq98hpvacv20snndupjrqhuc79zl6gjap565nku6et5zdx", "epoch": 215 }, { "pool_id": "pool1wvccajt4eugjtf3k0ja3exjqdj7t8egsujwhcw4tzj4rzsxzw5w", "epoch": 231 } ] "# } test_schema! { test_pools_by_id, Pool, r#" { "pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735", "vrf_key": "0b5245f9934ec2151116fb8ec00f35fd00e0aa3b075c4ed12cce440f999d8233", "blocks_minted": 69, "live_stake": "6900000000", "live_size": 0.42, "live_saturation": 0.93, "live_delegators": 127, "active_stake": "4200000000", "active_size": 0.43, "declared_pledge": "5000000000", "live_pledge": "5000000001", "margin_cost": 0.05, "fixed_cost": "340000000", "reward_account": "stake1uxkptsa4lkr55jleztw43t37vgdn88l6ghclfwuxld2eykgpgvg3f", "owners": [ "stake1u98nnlkvkk23vtvf9273uq7cph5ww6u2yq2389psuqet90sv4xv9v" ], "registration": [ "9f83e5484f543e05b52e99988272a31da373f3aab4c064c76db96643a355d9dc", "7ce3b8c433bf401a190d58c8c483d8e3564dfd29ae8633c8b1b3e6c814403e95", "3e6e1200ce92977c3fe5996bd4d7d7e192bcb7e231bc762f9f240c76766535b9" ], "retirement": [ "252f622976d39e646815db75a77289cf16df4ad2b287dd8e3a889ce14c13d1a8" ] } "# } test_schema! { test_pools_history, Vec<PoolHistory>, r#" [ { "epoch": 233, "blocks": 22, "active_stake": "20485965693569", "active_size": 1.2345, "delegators_count": 115, "rewards": "206936253674159", "fees": "1290968354" } ] "# } test_schema! { test_pools_metadata, PoolMetadata, r#" { "pool_id": "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy", "hex": "0f292fcaa02b8b2f9b3c8f9fd8e0bb21abedb692a6d5058df3ef2735", "url": "https://stakenuts.com/mainnet.json", "hash": "47c0c68cb57f4a5b4a87bad896fc274678e7aea98e200fa14a1cb40c0cab1d8c", "ticker": "NUTS", "name": "Stake Nuts", "description": "The best pool ever", "homepage": "https://stakentus.com/" } "# } test_schema! { test_pools_relays, Vec<PoolRelay>, r#" [ { "ipv4": "4.4.4.4", "ipv6": "https://stakenuts.com/mainnet.json", "dns": "relay1.stakenuts.com", "dns_srv": "_relays._tcp.relays.stakenuts.com", "port": 3001 } ] "# } test_schema! { test_pool_delegators, Vec<PoolDelegator>, r#" [ { "address": "stake1ux4vspfvwuus9uwyp5p3f0ky7a30jq5j80jxse0fr7pa56sgn8kha", "live_stake": "1137959159981411" }, { "address": "stake1uylayej7esmarzd4mk4aru37zh9yz0luj3g9fsvgpfaxulq564r5u", "live_stake": "16958865648" }, { "address": "stake1u8lr2pnrgf8f7vrs9lt79hc3sxm8s2w4rwvgpncks3axx6q93d4ck", "live_stake": "18605647" } ] "# } test_schema! { test_pools_blocks, Vec<String>, r#" [ "d8982ca42cfe76b747cc681d35d671050a9e41e9cfe26573eb214e94fe6ff21d", "026436c539e2ce84c7f77ffe669f4e4bbbb3b9c53512e5857dcba8bb0b4e9a8c", "bcc8487f419b8c668a18ea2120822a05df6dfe1de1f0fac3feba88cf760f303c", "86bf7b4a274e0f8ec9816171667c1b4a0cfc661dc21563f271acea9482b62df7" ] "# } test_schema! { test_pools_updates, Vec<PoolUpdate>, r#" [ { "tx_hash": "6804edf9712d2b619edb6ac86861fe93a730693183a262b165fcc1ba1bc99cad", "cert_index": 0, "action": "registered" }, { "tx_hash": "9c190bc1ac88b2ab0c05a82d7de8b71b67a9316377e865748a89d4426c0d3005", "cert_index": 0, "action": "deregistered" }, { "tx_hash": "e14a75b0eb2625de7055f1f580d70426311b78e0d36dd695a6bdc96c7b3d80e0", "cert_index": 1, "action": "registered" } ] "# } }
Pool
identifier_name
chain_spec.rs
// Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; use node_runtime::constants::currency::*; use node_runtime::Block; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig, WASM_BINARY, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; use sc_service::Properties; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_runtime::{ traits::{IdentifyAccount, Verify}, Perbill, }; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; type AccountPublic = <Signature as Verify>::Signer; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Node `ChainSpec` extensions. /// /// Additional parameters for some Substrate core modules, /// customizable from the chain spec. #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] pub struct Extensions { /// Block numbers with known hashes. pub fork_blocks: sc_client::ForkBlocks<Block>, } /// Specialized `ChainSpec`. pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>; /// IceFrog testnet generator pub fn icefrog_testnet_config() -> Result<ChainSpec, String> { ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..]) } fn session_keys( grandpa: GrandpaId, babe: BabeId, im_online: ImOnlineId, authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { SessionKeys { grandpa, babe, im_online, authority_discovery, } } /// Helper function to generate a crypto pair from seed pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } /// Helper function to generate an account ID from seed pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where AccountPublic: From<<TPublic::Pair as Pair>::Public>, { AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed( seed: &str, ) -> ( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, ) { ( get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)), get_account_id_from_seed::<sr25519::Public>(seed), get_from_seed::<GrandpaId>(seed), get_from_seed::<BabeId>(seed), get_from_seed::<ImOnlineId>(seed), get_from_seed::<AuthorityDiscoveryId>(seed), ) } /// Helper function to create GenesisConfig for darwinia /// is_testnet: under test net we will use Alice & Bob as seed to generate keys, /// but in production enviroment, these accounts will use preset keys pub fn darwinia_genesis( initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )>, root_key: AccountId, endowed_accounts: Vec<AccountId>, enable_println: bool, is_testnet: bool, ) -> GenesisConfig { let eth_relay_authorities: Vec<AccountId> = if is_testnet { vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), ] } else { vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1] }; const RING_ENDOWMENT: Balance = 20_000_000 * COIN; const KTON_ENDOWMENT: Balance = 10 * COIN; const STASH: Balance = 1000 * COIN; GenesisConfig { frame_system: Some(SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), pallet_indices: Some(IndicesConfig { ids: endowed_accounts .iter() .cloned() .chain(initial_authorities.iter().map(|x| x.0.clone())) .collect::<Vec<_>>(), }), pallet_session: Some(SessionConfig { keys: initial_authorities .iter() .map(|x| { ( x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), ) }) .collect::<Vec<_>>(), }), // pallet_democracy: Some(DemocracyConfig::default()), // pallet_collective_Instance1: Some(CouncilConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), // pallet_collective_Instance2: Some(TechnicalCommitteeConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), pallet_contracts: Some(ContractsConfig { current_schedule: pallet_contracts::Schedule { enable_println, // this should only be enabled on development chains ..Default::default() }, gas_price: 1 * MILLI, }), pallet_sudo: Some(SudoConfig { key: root_key }), pallet_babe: Some(BabeConfig { authorities: vec![] }), pallet_im_online: Some(ImOnlineConfig { keys: vec![] }), pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }), pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }), // pallet_membership_Instance1: Some(Default::default()), // pallet_treasury: Some(Default::default()), pallet_ring: Some(BalancesConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, RING_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_kton: Some(KtonConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, KTON_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_staking: Some(StakingConfig { current_era: 0, validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32, stakers: initial_authorities .iter() .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) .collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), ..Default::default() }), } } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { fn staging_testnet_config_genesis() -> GenesisConfig { // stash, controller, session-key // generated with secret: // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done // and // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done let initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" ] .into(); let endowed_accounts: Vec<AccountId> = vec![root_key.clone()]; darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true) } let boot_nodes = vec![]; ChainSpec::from_genesis( "Staging Testnet", "staging_testnet", staging_testnet_config_genesis, boot_nodes, Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), None, None, Default::default(), ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { fn development_config_genesis() -> GenesisConfig { darwinia_genesis( vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::<sr25519::Public>("Alice"), vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Charlie"), get_account_id_from_seed::<sr25519::Public>("Dave"), get_account_id_from_seed::<sr25519::Public>("Eve"), get_account_id_from_seed::<sr25519::Public>("Ferdie"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), get_account_id_from_seed::<sr25519::Public>("Charlie//stash"), get_account_id_from_seed::<sr25519::Public>("Dave//stash"), get_account_id_from_seed::<sr25519::Public>("Eve//stash"), get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"), ], true, true, ) } ChainSpec::from_genesis( "Development", "dev", development_config_genesis, vec![], None, None, None, Default::default(), ) } /// IceFrog local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec
true, true, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) } /// IceFrog testnet config generator pub fn gen_icefrog_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ ( hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(), get_from_seed::<GrandpaId>("Alice"), get_from_seed::<BabeId>("Alice"), get_from_seed::<ImOnlineId>("Alice"), get_from_seed::<AuthorityDiscoveryId>("Alice"), ), ( hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(), get_from_seed::<GrandpaId>("Bob"), get_from_seed::<BabeId>("Bob"), get_from_seed::<ImOnlineId>("Bob"), get_from_seed::<AuthorityDiscoveryId>("Bob"), ), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, false, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) }
{ fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ],
identifier_body
chain_spec.rs
// Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; use node_runtime::constants::currency::*; use node_runtime::Block; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig, WASM_BINARY, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; use sc_service::Properties; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_runtime::{ traits::{IdentifyAccount, Verify}, Perbill, }; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; type AccountPublic = <Signature as Verify>::Signer; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Node `ChainSpec` extensions. /// /// Additional parameters for some Substrate core modules, /// customizable from the chain spec. #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] pub struct Extensions { /// Block numbers with known hashes. pub fork_blocks: sc_client::ForkBlocks<Block>, } /// Specialized `ChainSpec`. pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>; /// IceFrog testnet generator pub fn icefrog_testnet_config() -> Result<ChainSpec, String> { ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..]) } fn session_keys( grandpa: GrandpaId, babe: BabeId, im_online: ImOnlineId, authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { SessionKeys { grandpa, babe, im_online, authority_discovery, } } /// Helper function to generate a crypto pair from seed pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } /// Helper function to generate an account ID from seed pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where AccountPublic: From<<TPublic::Pair as Pair>::Public>, { AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed( seed: &str, ) -> ( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, ) { ( get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)), get_account_id_from_seed::<sr25519::Public>(seed), get_from_seed::<GrandpaId>(seed), get_from_seed::<BabeId>(seed), get_from_seed::<ImOnlineId>(seed), get_from_seed::<AuthorityDiscoveryId>(seed), ) } /// Helper function to create GenesisConfig for darwinia /// is_testnet: under test net we will use Alice & Bob as seed to generate keys, /// but in production enviroment, these accounts will use preset keys pub fn darwinia_genesis( initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )>, root_key: AccountId, endowed_accounts: Vec<AccountId>, enable_println: bool, is_testnet: bool, ) -> GenesisConfig { let eth_relay_authorities: Vec<AccountId> = if is_testnet { vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), ] } else
; const RING_ENDOWMENT: Balance = 20_000_000 * COIN; const KTON_ENDOWMENT: Balance = 10 * COIN; const STASH: Balance = 1000 * COIN; GenesisConfig { frame_system: Some(SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), pallet_indices: Some(IndicesConfig { ids: endowed_accounts .iter() .cloned() .chain(initial_authorities.iter().map(|x| x.0.clone())) .collect::<Vec<_>>(), }), pallet_session: Some(SessionConfig { keys: initial_authorities .iter() .map(|x| { ( x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), ) }) .collect::<Vec<_>>(), }), // pallet_democracy: Some(DemocracyConfig::default()), // pallet_collective_Instance1: Some(CouncilConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), // pallet_collective_Instance2: Some(TechnicalCommitteeConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), pallet_contracts: Some(ContractsConfig { current_schedule: pallet_contracts::Schedule { enable_println, // this should only be enabled on development chains ..Default::default() }, gas_price: 1 * MILLI, }), pallet_sudo: Some(SudoConfig { key: root_key }), pallet_babe: Some(BabeConfig { authorities: vec![] }), pallet_im_online: Some(ImOnlineConfig { keys: vec![] }), pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }), pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }), // pallet_membership_Instance1: Some(Default::default()), // pallet_treasury: Some(Default::default()), pallet_ring: Some(BalancesConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, RING_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_kton: Some(KtonConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, KTON_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_staking: Some(StakingConfig { current_era: 0, validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32, stakers: initial_authorities .iter() .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) .collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), ..Default::default() }), } } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { fn staging_testnet_config_genesis() -> GenesisConfig { // stash, controller, session-key // generated with secret: // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done // and // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done let initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" ] .into(); let endowed_accounts: Vec<AccountId> = vec![root_key.clone()]; darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true) } let boot_nodes = vec![]; ChainSpec::from_genesis( "Staging Testnet", "staging_testnet", staging_testnet_config_genesis, boot_nodes, Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), None, None, Default::default(), ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { fn development_config_genesis() -> GenesisConfig { darwinia_genesis( vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::<sr25519::Public>("Alice"), vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Charlie"), get_account_id_from_seed::<sr25519::Public>("Dave"), get_account_id_from_seed::<sr25519::Public>("Eve"), get_account_id_from_seed::<sr25519::Public>("Ferdie"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), get_account_id_from_seed::<sr25519::Public>("Charlie//stash"), get_account_id_from_seed::<sr25519::Public>("Dave//stash"), get_account_id_from_seed::<sr25519::Public>("Eve//stash"), get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"), ], true, true, ) } ChainSpec::from_genesis( "Development", "dev", development_config_genesis, vec![], None, None, None, Default::default(), ) } /// IceFrog local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, true, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) } /// IceFrog testnet config generator pub fn gen_icefrog_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ ( hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(), get_from_seed::<GrandpaId>("Alice"), get_from_seed::<BabeId>("Alice"), get_from_seed::<ImOnlineId>("Alice"), get_from_seed::<AuthorityDiscoveryId>("Alice"), ), ( hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(), get_from_seed::<GrandpaId>("Bob"), get_from_seed::<BabeId>("Bob"), get_from_seed::<ImOnlineId>("Bob"), get_from_seed::<AuthorityDiscoveryId>("Bob"), ), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, false, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) }
{ vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1] }
conditional_block
chain_spec.rs
// Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; use node_runtime::constants::currency::*; use node_runtime::Block; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig, WASM_BINARY, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; use sc_service::Properties; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_runtime::{ traits::{IdentifyAccount, Verify}, Perbill, }; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; type AccountPublic = <Signature as Verify>::Signer; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Node `ChainSpec` extensions. /// /// Additional parameters for some Substrate core modules, /// customizable from the chain spec. #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] pub struct Extensions { /// Block numbers with known hashes. pub fork_blocks: sc_client::ForkBlocks<Block>, } /// Specialized `ChainSpec`. pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>; /// IceFrog testnet generator pub fn icefrog_testnet_config() -> Result<ChainSpec, String> { ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..]) } fn session_keys( grandpa: GrandpaId, babe: BabeId, im_online: ImOnlineId, authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { SessionKeys { grandpa, babe, im_online, authority_discovery, } } /// Helper function to generate a crypto pair from seed pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } /// Helper function to generate an account ID from seed pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where AccountPublic: From<<TPublic::Pair as Pair>::Public>, { AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed( seed: &str, ) -> ( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, ) { ( get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)), get_account_id_from_seed::<sr25519::Public>(seed), get_from_seed::<GrandpaId>(seed), get_from_seed::<BabeId>(seed), get_from_seed::<ImOnlineId>(seed), get_from_seed::<AuthorityDiscoveryId>(seed), ) } /// Helper function to create GenesisConfig for darwinia /// is_testnet: under test net we will use Alice & Bob as seed to generate keys, /// but in production enviroment, these accounts will use preset keys pub fn darwinia_genesis( initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )>, root_key: AccountId, endowed_accounts: Vec<AccountId>, enable_println: bool, is_testnet: bool, ) -> GenesisConfig { let eth_relay_authorities: Vec<AccountId> = if is_testnet { vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), ] } else { vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1] }; const RING_ENDOWMENT: Balance = 20_000_000 * COIN; const KTON_ENDOWMENT: Balance = 10 * COIN; const STASH: Balance = 1000 * COIN; GenesisConfig { frame_system: Some(SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), pallet_indices: Some(IndicesConfig { ids: endowed_accounts .iter() .cloned() .chain(initial_authorities.iter().map(|x| x.0.clone())) .collect::<Vec<_>>(), }), pallet_session: Some(SessionConfig { keys: initial_authorities .iter() .map(|x| { ( x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), ) }) .collect::<Vec<_>>(), }), // pallet_democracy: Some(DemocracyConfig::default()), // pallet_collective_Instance1: Some(CouncilConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), // pallet_collective_Instance2: Some(TechnicalCommitteeConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), pallet_contracts: Some(ContractsConfig { current_schedule: pallet_contracts::Schedule { enable_println, // this should only be enabled on development chains ..Default::default() }, gas_price: 1 * MILLI, }), pallet_sudo: Some(SudoConfig { key: root_key }), pallet_babe: Some(BabeConfig { authorities: vec![] }), pallet_im_online: Some(ImOnlineConfig { keys: vec![] }), pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }), pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }), // pallet_membership_Instance1: Some(Default::default()), // pallet_treasury: Some(Default::default()), pallet_ring: Some(BalancesConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, RING_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_kton: Some(KtonConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, KTON_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }),
stakers: initial_authorities .iter() .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) .collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), ..Default::default() }), } } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { fn staging_testnet_config_genesis() -> GenesisConfig { // stash, controller, session-key // generated with secret: // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done // and // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done let initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" ] .into(); let endowed_accounts: Vec<AccountId> = vec![root_key.clone()]; darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true) } let boot_nodes = vec![]; ChainSpec::from_genesis( "Staging Testnet", "staging_testnet", staging_testnet_config_genesis, boot_nodes, Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), None, None, Default::default(), ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { fn development_config_genesis() -> GenesisConfig { darwinia_genesis( vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::<sr25519::Public>("Alice"), vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Charlie"), get_account_id_from_seed::<sr25519::Public>("Dave"), get_account_id_from_seed::<sr25519::Public>("Eve"), get_account_id_from_seed::<sr25519::Public>("Ferdie"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), get_account_id_from_seed::<sr25519::Public>("Charlie//stash"), get_account_id_from_seed::<sr25519::Public>("Dave//stash"), get_account_id_from_seed::<sr25519::Public>("Eve//stash"), get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"), ], true, true, ) } ChainSpec::from_genesis( "Development", "dev", development_config_genesis, vec![], None, None, None, Default::default(), ) } /// IceFrog local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, true, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) } /// IceFrog testnet config generator pub fn gen_icefrog_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ ( hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(), get_from_seed::<GrandpaId>("Alice"), get_from_seed::<BabeId>("Alice"), get_from_seed::<ImOnlineId>("Alice"), get_from_seed::<AuthorityDiscoveryId>("Alice"), ), ( hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(), get_from_seed::<GrandpaId>("Bob"), get_from_seed::<BabeId>("Bob"), get_from_seed::<ImOnlineId>("Bob"), get_from_seed::<AuthorityDiscoveryId>("Bob"), ), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, false, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) }
pallet_staking: Some(StakingConfig { current_era: 0, validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32,
random_line_split
chain_spec.rs
// Copyright 2018-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; use hex_literal::hex; use node_runtime::constants::currency::*; use node_runtime::Block; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig, WASM_BINARY, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sc_chain_spec::ChainSpecExtension; use sc_service::Properties; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_runtime::{ traits::{IdentifyAccount, Verify}, Perbill, }; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; type AccountPublic = <Signature as Verify>::Signer; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Node `ChainSpec` extensions. /// /// Additional parameters for some Substrate core modules, /// customizable from the chain spec. #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] pub struct Extensions { /// Block numbers with known hashes. pub fork_blocks: sc_client::ForkBlocks<Block>, } /// Specialized `ChainSpec`. pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>; /// IceFrog testnet generator pub fn icefrog_testnet_config() -> Result<ChainSpec, String> { ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..]) } fn session_keys( grandpa: GrandpaId, babe: BabeId, im_online: ImOnlineId, authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { SessionKeys { grandpa, babe, im_online, authority_discovery, } } /// Helper function to generate a crypto pair from seed pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } /// Helper function to generate an account ID from seed pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where AccountPublic: From<<TPublic::Pair as Pair>::Public>, { AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed( seed: &str, ) -> ( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, ) { ( get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)), get_account_id_from_seed::<sr25519::Public>(seed), get_from_seed::<GrandpaId>(seed), get_from_seed::<BabeId>(seed), get_from_seed::<ImOnlineId>(seed), get_from_seed::<AuthorityDiscoveryId>(seed), ) } /// Helper function to create GenesisConfig for darwinia /// is_testnet: under test net we will use Alice & Bob as seed to generate keys, /// but in production enviroment, these accounts will use preset keys pub fn darwinia_genesis( initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )>, root_key: AccountId, endowed_accounts: Vec<AccountId>, enable_println: bool, is_testnet: bool, ) -> GenesisConfig { let eth_relay_authorities: Vec<AccountId> = if is_testnet { vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), ] } else { vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1] }; const RING_ENDOWMENT: Balance = 20_000_000 * COIN; const KTON_ENDOWMENT: Balance = 10 * COIN; const STASH: Balance = 1000 * COIN; GenesisConfig { frame_system: Some(SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), pallet_indices: Some(IndicesConfig { ids: endowed_accounts .iter() .cloned() .chain(initial_authorities.iter().map(|x| x.0.clone())) .collect::<Vec<_>>(), }), pallet_session: Some(SessionConfig { keys: initial_authorities .iter() .map(|x| { ( x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), ) }) .collect::<Vec<_>>(), }), // pallet_democracy: Some(DemocracyConfig::default()), // pallet_collective_Instance1: Some(CouncilConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), // pallet_collective_Instance2: Some(TechnicalCommitteeConfig { // members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(), // phantom: Default::default(), // }), pallet_contracts: Some(ContractsConfig { current_schedule: pallet_contracts::Schedule { enable_println, // this should only be enabled on development chains ..Default::default() }, gas_price: 1 * MILLI, }), pallet_sudo: Some(SudoConfig { key: root_key }), pallet_babe: Some(BabeConfig { authorities: vec![] }), pallet_im_online: Some(ImOnlineConfig { keys: vec![] }), pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }), pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }), // pallet_membership_Instance1: Some(Default::default()), // pallet_treasury: Some(Default::default()), pallet_ring: Some(BalancesConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, RING_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_kton: Some(KtonConfig { balances: endowed_accounts .iter() .cloned() .map(|k| (k, KTON_ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), vesting: vec![], }), pallet_staking: Some(StakingConfig { current_era: 0, validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32, stakers: initial_authorities .iter() .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) .collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), ..Default::default() }), } } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { fn staging_testnet_config_genesis() -> GenesisConfig { // stash, controller, session-key // generated with secret: // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done // and // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done let initial_authorities: Vec<( AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" ] .into(); let endowed_accounts: Vec<AccountId> = vec![root_key.clone()]; darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true) } let boot_nodes = vec![]; ChainSpec::from_genesis( "Staging Testnet", "staging_testnet", staging_testnet_config_genesis, boot_nodes, Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), None, None, Default::default(), ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { fn development_config_genesis() -> GenesisConfig { darwinia_genesis( vec![get_authority_keys_from_seed("Alice")], get_account_id_from_seed::<sr25519::Public>("Alice"), vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Charlie"), get_account_id_from_seed::<sr25519::Public>("Dave"), get_account_id_from_seed::<sr25519::Public>("Eve"), get_account_id_from_seed::<sr25519::Public>("Ferdie"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), get_account_id_from_seed::<sr25519::Public>("Charlie//stash"), get_account_id_from_seed::<sr25519::Public>("Dave//stash"), get_account_id_from_seed::<sr25519::Public>("Eve//stash"), get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"), ], true, true, ) } ChainSpec::from_genesis( "Development", "dev", development_config_genesis, vec![], None, None, None, Default::default(), ) } /// IceFrog local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec { fn
() -> GenesisConfig { darwinia_genesis( vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, true, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) } /// IceFrog testnet config generator pub fn gen_icefrog_testnet_config() -> ChainSpec { fn icefrog_config_genesis() -> GenesisConfig { darwinia_genesis( vec![ ( hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(), get_from_seed::<GrandpaId>("Alice"), get_from_seed::<BabeId>("Alice"), get_from_seed::<ImOnlineId>("Alice"), get_from_seed::<AuthorityDiscoveryId>("Alice"), ), ( hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(), get_from_seed::<GrandpaId>("Bob"), get_from_seed::<BabeId>("Bob"), get_from_seed::<ImOnlineId>("Bob"), get_from_seed::<AuthorityDiscoveryId>("Bob"), ), ], hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), vec![ hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(), hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(), hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(), hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(), hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(), hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(), hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(), hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(), hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(), ], true, false, ) } ChainSpec::from_genesis( "Darwinia IceFrog Testnet", "icefrog_testnet", icefrog_config_genesis, vec![], Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), Some("DAR"), { let mut properties = Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenDecimals".into(), 9.into()); properties.insert("tokenSymbol".into(), "IRING".into()); properties.insert("ktonTokenDecimals".into(), 9.into()); properties.insert("ktonTokenSymbol".into(), "IKTON".into()); Some(properties) }, Default::default(), ) }
icefrog_config_genesis
identifier_name
ext.rs
//! Safe wrapper around externalities invokes. use wasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) ->!; pub fn ret(ptr: *const u8, len: u32) ->!; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) ->! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe { if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address {
/// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn block_number() -> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn input() -> wasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => wasm_std::Vec::new(), non_zero => { let mut data = wasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) ->! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
unsafe { fetch_address(|x| external::coinbase(x) ) } }
identifier_body
ext.rs
//! Safe wrapper around externalities invokes. use wasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) ->!; pub fn ret(ptr: *const u8, len: u32) ->!; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) ->! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe { if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0
else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address { unsafe { fetch_address(|x| external::coinbase(x) ) } } /// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn block_number() -> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn input() -> wasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => wasm_std::Vec::new(), non_zero => { let mut data = wasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) ->! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
{ Ok(()) }
conditional_block
ext.rs
//! Safe wrapper around externalities invokes. use wasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) ->!; pub fn ret(ptr: *const u8, len: u32) ->!; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) ->! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe {
if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address { unsafe { fetch_address(|x| external::coinbase(x) ) } } /// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn block_number() -> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn input() -> wasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => wasm_std::Vec::new(), non_zero => { let mut data = wasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) ->! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
random_line_split
ext.rs
//! Safe wrapper around externalities invokes. use wasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) ->!; pub fn ret(ptr: *const u8, len: u32) ->!; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) ->! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe { if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address { unsafe { fetch_address(|x| external::coinbase(x) ) } } /// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn bl
-> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn input() -> wasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => wasm_std::Vec::new(), non_zero => { let mut data = wasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) ->! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
ock_number()
identifier_name
async_await_basics.rs
use futures::executor::block_on; use std::thread::Thread; use std::sync::mpsc; use futures::join; use { std::{ pin::Pin, task::Waker, thread, }, }; use { futures::{ future::{FutureExt, BoxFuture}, task::{ArcWake, waker_ref}, }, std::{ future::Future, sync::{Arc, Mutex}, sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, }; fn async_await_basics_main() { println!("Hello, world!"); block_on(async_main()); let (executor, spawner) = new_executor_and_spawner(); // Spawn a task to print before and after waiting on a timer. spawner.spawn(async { println!("howdy!"); // Wait for our timer future to complete after two seconds. TimerFuture::new(Duration::new(2, 0)).await; println!("done!"); }); // Drop the spawner so that our executor knows it is finished and won't // receive more incoming tasks to run. drop(spawner); // Run the executor until the task queue is empty. // This will print "howdy!", pause, and then print "done!". executor.run(); } struct Song { name: String } impl Song { fn new() -> Song { Song { name: String::from("Hotel California") } } } async fn learn_song() -> Song { println!("Learning Song!"); //std::thread::sleep(Duration::from_secs(2)); println!("Good Progress!"); //std::thread::sleep(Duration::from_secs(1)); Song::new() } async fn sing_song(song: Song) { println!("Tune instruments! {}", song.name); //std::thread::sleep(Duration::from_secs(1)); println!("Singing Song! {}", song.name); } async fn dance() { println!("Dance!!") } async fn learn_and_sing() { let song = learn_song().await; sing_song(song).await; } async fn async_main() { let f2 = dance(); let f1 = learn_and_sing(); futures::join!(f2, f1); } // Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures // that have been submitted to an executor. // Waker provides a wake() method that can be used to tell the executor that the associated task // should be awoken. When wake() is called, the executor knows that the task associated with the Waker // is ready to make progress, and its future should be polled again. // Waker also implements clone() so that it can be copied around and stored. pub struct
{ shared_state: Arc<Mutex<SharedState>>, } /// Shared state between the future and the waiting thread struct SharedState { /// Whether or not the sleep time has elapsed completed: bool, /// The waker for the task that `TimerFuture` is running on. /// The thread can use this after setting `completed = true` to tell /// `TimerFuture`'s task to wake up, see that `completed = true`, and /// move forward. waker: Option<Waker>, } impl Future for TimerFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // Look at the shared state to see if the timer has already completed. let mut shared_state = self.shared_state.lock().unwrap(); if shared_state.completed { Poll::Ready(()) } else { // Set waker so that the thread can wake up the current task // when the timer has completed, ensuring that the future is polled // again and sees that `completed = true`. // // It's tempting to do this once rather than repeatedly cloning // the waker each time. However, the `TimerFuture` can move between // tasks on the executor, which could cause a stale waker pointing // to the wrong task, preventing `TimerFuture` from waking up // correctly. // // N.B. it's possible to check for this using the `Waker::will_wake` // function, but we omit that here to keep things simple. shared_state.waker = Some(cx.waker().clone()); Poll::Pending } } } impl TimerFuture { /// Create a new `TimerFuture` which will complete after the provided /// timeout. pub fn new(duration: Duration) -> Self { let shared_state = Arc::new(Mutex::new(SharedState { completed: false, waker: None, })); // Spawn the new thread let thread_shared_state = shared_state.clone(); thread::spawn(move || { thread::sleep(duration); let mut shared_state = thread_shared_state.lock().unwrap(); // Signal that the timer has completed and wake up the last // task on which the future was polled, if one exists. shared_state.completed = true; if let Some(waker) = shared_state.waker.take() { waker.wake() } }); TimerFuture { shared_state } } } /// Task executor that receives tasks off of a channel and runs them. struct Executor { ready_queue: Receiver<Arc<Task>>, } /// `Spawner` spawns new futures onto the task channel. #[derive(Clone)] struct Spawner { task_sender: SyncSender<Arc<Task>>, } /// A future that can reschedule itself to be polled by an `Executor`. struct Task { /// In-progress future that should be pushed to completion. /// /// The `Mutex` is not necessary for correctness, since we only have /// one thread executing tasks at once. However, Rust isn't smart /// enough to know that `future` is only mutated from one thread, /// so we need use the `Mutex` to prove thread-safety. A production /// executor would not need this, and could use `UnsafeCell` instead. future: Mutex<Option<BoxFuture<'static, ()>>>, /// Handle to place the task itself back onto the task queue. task_sender: SyncSender<Arc<Task>>, } fn new_executor_and_spawner() -> (Executor, Spawner) { // Maximum number of tasks to allow queueing in the channel at once. // This is just to make `sync_channel` happy, and wouldn't be present in // a real executor. const MAX_QUEUED_TASKS: usize = 10_000; let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS); (Executor { ready_queue }, Spawner { task_sender }) } impl Spawner { fn spawn(&self, future: impl Future<Output=()> +'static + Send) { let future = future.boxed(); let task = Arc::new(Task { future: Mutex::new(Some(future)), task_sender: self.task_sender.clone(), }); self.task_sender.send(task).expect("too many tasks queued"); } } // To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers // are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers // tell the executor exactly which task has become ready, allowing them to poll just the futures that // are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake // trait and then using the waker_ref or.into_waker() functions to turn an Arc<impl ArcWake> into a // Waker. impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc<Self>) { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); arc_self.task_sender.send(cloned).expect("too many tasks queued"); } } impl Executor { fn run(&self) { while let Ok(task) = self.ready_queue.recv() { // Take the future, and if it has not yet completed (is still Some), // poll it in an attempt to complete it. let mut future_slot = task.future.lock().unwrap(); if let Some(mut future) = future_slot.take() { // Create a `LocalWaker` from the task itself let waker = waker_ref(&task); let context = &mut Context::from_waker(&*waker); // `BoxFuture<T>` is a type alias for // `Pin<Box<dyn Future<Output = T> + Send +'static>>`. // We can get a `Pin<&mut dyn Future + Send +'static>` // from it by calling the `Pin::as_mut` method. if let Poll::Pending = future.as_mut().poll(context) { // We're not done processing the future, so put it // back in its task to be run again in the future. *future_slot = Some(future); } } } } } // In practice, this problem is solved through integration with an IO-aware system blocking primitive, // such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all // of which are exposed through the cross-platform Rust crate mio). These primitives all allow a // thread to block on multiple asynchronous IO events, returning once one of the events completes. // In practice, these APIs usually look something like this: /*struct IoBlocker { /*... */ } struct Event { // An ID uniquely identifying the event that occurred and was listened for. id: usize, // A set of signals to wait for, or which occurred. signals: Signals, } impl IoBlocker { /// Create a new collection of asynchronous IO events to block on. fn new() -> Self { /*... */ } /// Express an interest in a particular IO event. fn add_io_event_interest( &self, /// The object on which the event will occur io_object: &IoObject, /// A set of signals that may appear on the `io_object` for /// which an event should be triggered, paired with /// an ID to give to events that result from this interest. event: Event, ) { /*... */ } /// Block until one of the events occurs. fn block(&self) -> Event { /*... */ } } let mut io_blocker = IoBlocker::new(); io_blocker.add_io_event_interest( &socket_1, Event { id: 1, signals: READABLE }, ); io_blocker.add_io_event_interest( &socket_2, Event { id: 2, signals: READABLE | WRITABLE }, ); let event = io_blocker.block(); // prints e.g. "Socket 1 is now READABLE" if socket one became readable. println!("Socket {:?} is now {:?}", event.id, event.signals); */ /* Futures executors can use these primitives to provide asynchronous IO objects such as sockets that can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead example above, the Socket::set_readable_callback function might look like the following pseudocode: impl Socket { fn set_readable_callback(&self, waker: Waker) { // `local_executor` is a reference to the local executor. // this could be provided at creation of the socket, but in practice // many executor implementations pass it down through thread local // storage for convenience. let local_executor = self.local_executor; // Unique ID for this IO object. let id = self.id; // Store the local waker in the executor's map so that it can be called // once the IO event arrives. local_executor.event_map.insert(id, waker); local_executor.add_io_event_interest( &self.socket_file_descriptor, Event { id, signals: READABLE }, ); } } We can now have just one executor thread which can receive and dispatch any IO event to the appropriate Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion before returning to check for more IO events (and the cycle continues...). */ // Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an.await, as // it can cause the threadpool to lock up: one task could take out a lock,.await and yield to the // executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this, // use the Mutex in futures::lock rather than the one from std::sync.
TimerFuture
identifier_name
async_await_basics.rs
use futures::executor::block_on; use std::thread::Thread; use std::sync::mpsc; use futures::join; use { std::{ pin::Pin, task::Waker, thread, }, }; use { futures::{ future::{FutureExt, BoxFuture}, task::{ArcWake, waker_ref}, }, std::{ future::Future, sync::{Arc, Mutex}, sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, }; fn async_await_basics_main() { println!("Hello, world!"); block_on(async_main()); let (executor, spawner) = new_executor_and_spawner(); // Spawn a task to print before and after waiting on a timer. spawner.spawn(async { println!("howdy!"); // Wait for our timer future to complete after two seconds. TimerFuture::new(Duration::new(2, 0)).await; println!("done!"); }); // Drop the spawner so that our executor knows it is finished and won't // receive more incoming tasks to run. drop(spawner); // Run the executor until the task queue is empty. // This will print "howdy!", pause, and then print "done!". executor.run(); } struct Song { name: String } impl Song { fn new() -> Song { Song { name: String::from("Hotel California") } } } async fn learn_song() -> Song { println!("Learning Song!"); //std::thread::sleep(Duration::from_secs(2)); println!("Good Progress!"); //std::thread::sleep(Duration::from_secs(1)); Song::new() } async fn sing_song(song: Song) { println!("Tune instruments! {}", song.name); //std::thread::sleep(Duration::from_secs(1)); println!("Singing Song! {}", song.name); } async fn dance() { println!("Dance!!") } async fn learn_and_sing() { let song = learn_song().await; sing_song(song).await; } async fn async_main() { let f2 = dance(); let f1 = learn_and_sing(); futures::join!(f2, f1); } // Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures // that have been submitted to an executor. // Waker provides a wake() method that can be used to tell the executor that the associated task // should be awoken. When wake() is called, the executor knows that the task associated with the Waker // is ready to make progress, and its future should be polled again. // Waker also implements clone() so that it can be copied around and stored. pub struct TimerFuture { shared_state: Arc<Mutex<SharedState>>, } /// Shared state between the future and the waiting thread struct SharedState { /// Whether or not the sleep time has elapsed completed: bool, /// The waker for the task that `TimerFuture` is running on. /// The thread can use this after setting `completed = true` to tell /// `TimerFuture`'s task to wake up, see that `completed = true`, and /// move forward. waker: Option<Waker>, } impl Future for TimerFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // Look at the shared state to see if the timer has already completed. let mut shared_state = self.shared_state.lock().unwrap(); if shared_state.completed { Poll::Ready(()) } else { // Set waker so that the thread can wake up the current task // when the timer has completed, ensuring that the future is polled // again and sees that `completed = true`. // // It's tempting to do this once rather than repeatedly cloning // the waker each time. However, the `TimerFuture` can move between // tasks on the executor, which could cause a stale waker pointing // to the wrong task, preventing `TimerFuture` from waking up // correctly.
// function, but we omit that here to keep things simple. shared_state.waker = Some(cx.waker().clone()); Poll::Pending } } } impl TimerFuture { /// Create a new `TimerFuture` which will complete after the provided /// timeout. pub fn new(duration: Duration) -> Self { let shared_state = Arc::new(Mutex::new(SharedState { completed: false, waker: None, })); // Spawn the new thread let thread_shared_state = shared_state.clone(); thread::spawn(move || { thread::sleep(duration); let mut shared_state = thread_shared_state.lock().unwrap(); // Signal that the timer has completed and wake up the last // task on which the future was polled, if one exists. shared_state.completed = true; if let Some(waker) = shared_state.waker.take() { waker.wake() } }); TimerFuture { shared_state } } } /// Task executor that receives tasks off of a channel and runs them. struct Executor { ready_queue: Receiver<Arc<Task>>, } /// `Spawner` spawns new futures onto the task channel. #[derive(Clone)] struct Spawner { task_sender: SyncSender<Arc<Task>>, } /// A future that can reschedule itself to be polled by an `Executor`. struct Task { /// In-progress future that should be pushed to completion. /// /// The `Mutex` is not necessary for correctness, since we only have /// one thread executing tasks at once. However, Rust isn't smart /// enough to know that `future` is only mutated from one thread, /// so we need use the `Mutex` to prove thread-safety. A production /// executor would not need this, and could use `UnsafeCell` instead. future: Mutex<Option<BoxFuture<'static, ()>>>, /// Handle to place the task itself back onto the task queue. task_sender: SyncSender<Arc<Task>>, } fn new_executor_and_spawner() -> (Executor, Spawner) { // Maximum number of tasks to allow queueing in the channel at once. // This is just to make `sync_channel` happy, and wouldn't be present in // a real executor. const MAX_QUEUED_TASKS: usize = 10_000; let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS); (Executor { ready_queue }, Spawner { task_sender }) } impl Spawner { fn spawn(&self, future: impl Future<Output=()> +'static + Send) { let future = future.boxed(); let task = Arc::new(Task { future: Mutex::new(Some(future)), task_sender: self.task_sender.clone(), }); self.task_sender.send(task).expect("too many tasks queued"); } } // To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers // are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers // tell the executor exactly which task has become ready, allowing them to poll just the futures that // are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake // trait and then using the waker_ref or.into_waker() functions to turn an Arc<impl ArcWake> into a // Waker. impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc<Self>) { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); arc_self.task_sender.send(cloned).expect("too many tasks queued"); } } impl Executor { fn run(&self) { while let Ok(task) = self.ready_queue.recv() { // Take the future, and if it has not yet completed (is still Some), // poll it in an attempt to complete it. let mut future_slot = task.future.lock().unwrap(); if let Some(mut future) = future_slot.take() { // Create a `LocalWaker` from the task itself let waker = waker_ref(&task); let context = &mut Context::from_waker(&*waker); // `BoxFuture<T>` is a type alias for // `Pin<Box<dyn Future<Output = T> + Send +'static>>`. // We can get a `Pin<&mut dyn Future + Send +'static>` // from it by calling the `Pin::as_mut` method. if let Poll::Pending = future.as_mut().poll(context) { // We're not done processing the future, so put it // back in its task to be run again in the future. *future_slot = Some(future); } } } } } // In practice, this problem is solved through integration with an IO-aware system blocking primitive, // such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all // of which are exposed through the cross-platform Rust crate mio). These primitives all allow a // thread to block on multiple asynchronous IO events, returning once one of the events completes. // In practice, these APIs usually look something like this: /*struct IoBlocker { /*... */ } struct Event { // An ID uniquely identifying the event that occurred and was listened for. id: usize, // A set of signals to wait for, or which occurred. signals: Signals, } impl IoBlocker { /// Create a new collection of asynchronous IO events to block on. fn new() -> Self { /*... */ } /// Express an interest in a particular IO event. fn add_io_event_interest( &self, /// The object on which the event will occur io_object: &IoObject, /// A set of signals that may appear on the `io_object` for /// which an event should be triggered, paired with /// an ID to give to events that result from this interest. event: Event, ) { /*... */ } /// Block until one of the events occurs. fn block(&self) -> Event { /*... */ } } let mut io_blocker = IoBlocker::new(); io_blocker.add_io_event_interest( &socket_1, Event { id: 1, signals: READABLE }, ); io_blocker.add_io_event_interest( &socket_2, Event { id: 2, signals: READABLE | WRITABLE }, ); let event = io_blocker.block(); // prints e.g. "Socket 1 is now READABLE" if socket one became readable. println!("Socket {:?} is now {:?}", event.id, event.signals); */ /* Futures executors can use these primitives to provide asynchronous IO objects such as sockets that can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead example above, the Socket::set_readable_callback function might look like the following pseudocode: impl Socket { fn set_readable_callback(&self, waker: Waker) { // `local_executor` is a reference to the local executor. // this could be provided at creation of the socket, but in practice // many executor implementations pass it down through thread local // storage for convenience. let local_executor = self.local_executor; // Unique ID for this IO object. let id = self.id; // Store the local waker in the executor's map so that it can be called // once the IO event arrives. local_executor.event_map.insert(id, waker); local_executor.add_io_event_interest( &self.socket_file_descriptor, Event { id, signals: READABLE }, ); } } We can now have just one executor thread which can receive and dispatch any IO event to the appropriate Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion before returning to check for more IO events (and the cycle continues...). */ // Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an.await, as // it can cause the threadpool to lock up: one task could take out a lock,.await and yield to the // executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this, // use the Mutex in futures::lock rather than the one from std::sync.
// // N.B. it's possible to check for this using the `Waker::will_wake`
random_line_split
async_await_basics.rs
use futures::executor::block_on; use std::thread::Thread; use std::sync::mpsc; use futures::join; use { std::{ pin::Pin, task::Waker, thread, }, }; use { futures::{ future::{FutureExt, BoxFuture}, task::{ArcWake, waker_ref}, }, std::{ future::Future, sync::{Arc, Mutex}, sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, }; fn async_await_basics_main() { println!("Hello, world!"); block_on(async_main()); let (executor, spawner) = new_executor_and_spawner(); // Spawn a task to print before and after waiting on a timer. spawner.spawn(async { println!("howdy!"); // Wait for our timer future to complete after two seconds. TimerFuture::new(Duration::new(2, 0)).await; println!("done!"); }); // Drop the spawner so that our executor knows it is finished and won't // receive more incoming tasks to run. drop(spawner); // Run the executor until the task queue is empty. // This will print "howdy!", pause, and then print "done!". executor.run(); } struct Song { name: String } impl Song { fn new() -> Song { Song { name: String::from("Hotel California") } } } async fn learn_song() -> Song { println!("Learning Song!"); //std::thread::sleep(Duration::from_secs(2)); println!("Good Progress!"); //std::thread::sleep(Duration::from_secs(1)); Song::new() } async fn sing_song(song: Song) { println!("Tune instruments! {}", song.name); //std::thread::sleep(Duration::from_secs(1)); println!("Singing Song! {}", song.name); } async fn dance()
async fn learn_and_sing() { let song = learn_song().await; sing_song(song).await; } async fn async_main() { let f2 = dance(); let f1 = learn_and_sing(); futures::join!(f2, f1); } // Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures // that have been submitted to an executor. // Waker provides a wake() method that can be used to tell the executor that the associated task // should be awoken. When wake() is called, the executor knows that the task associated with the Waker // is ready to make progress, and its future should be polled again. // Waker also implements clone() so that it can be copied around and stored. pub struct TimerFuture { shared_state: Arc<Mutex<SharedState>>, } /// Shared state between the future and the waiting thread struct SharedState { /// Whether or not the sleep time has elapsed completed: bool, /// The waker for the task that `TimerFuture` is running on. /// The thread can use this after setting `completed = true` to tell /// `TimerFuture`'s task to wake up, see that `completed = true`, and /// move forward. waker: Option<Waker>, } impl Future for TimerFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // Look at the shared state to see if the timer has already completed. let mut shared_state = self.shared_state.lock().unwrap(); if shared_state.completed { Poll::Ready(()) } else { // Set waker so that the thread can wake up the current task // when the timer has completed, ensuring that the future is polled // again and sees that `completed = true`. // // It's tempting to do this once rather than repeatedly cloning // the waker each time. However, the `TimerFuture` can move between // tasks on the executor, which could cause a stale waker pointing // to the wrong task, preventing `TimerFuture` from waking up // correctly. // // N.B. it's possible to check for this using the `Waker::will_wake` // function, but we omit that here to keep things simple. shared_state.waker = Some(cx.waker().clone()); Poll::Pending } } } impl TimerFuture { /// Create a new `TimerFuture` which will complete after the provided /// timeout. pub fn new(duration: Duration) -> Self { let shared_state = Arc::new(Mutex::new(SharedState { completed: false, waker: None, })); // Spawn the new thread let thread_shared_state = shared_state.clone(); thread::spawn(move || { thread::sleep(duration); let mut shared_state = thread_shared_state.lock().unwrap(); // Signal that the timer has completed and wake up the last // task on which the future was polled, if one exists. shared_state.completed = true; if let Some(waker) = shared_state.waker.take() { waker.wake() } }); TimerFuture { shared_state } } } /// Task executor that receives tasks off of a channel and runs them. struct Executor { ready_queue: Receiver<Arc<Task>>, } /// `Spawner` spawns new futures onto the task channel. #[derive(Clone)] struct Spawner { task_sender: SyncSender<Arc<Task>>, } /// A future that can reschedule itself to be polled by an `Executor`. struct Task { /// In-progress future that should be pushed to completion. /// /// The `Mutex` is not necessary for correctness, since we only have /// one thread executing tasks at once. However, Rust isn't smart /// enough to know that `future` is only mutated from one thread, /// so we need use the `Mutex` to prove thread-safety. A production /// executor would not need this, and could use `UnsafeCell` instead. future: Mutex<Option<BoxFuture<'static, ()>>>, /// Handle to place the task itself back onto the task queue. task_sender: SyncSender<Arc<Task>>, } fn new_executor_and_spawner() -> (Executor, Spawner) { // Maximum number of tasks to allow queueing in the channel at once. // This is just to make `sync_channel` happy, and wouldn't be present in // a real executor. const MAX_QUEUED_TASKS: usize = 10_000; let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS); (Executor { ready_queue }, Spawner { task_sender }) } impl Spawner { fn spawn(&self, future: impl Future<Output=()> +'static + Send) { let future = future.boxed(); let task = Arc::new(Task { future: Mutex::new(Some(future)), task_sender: self.task_sender.clone(), }); self.task_sender.send(task).expect("too many tasks queued"); } } // To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers // are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers // tell the executor exactly which task has become ready, allowing them to poll just the futures that // are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake // trait and then using the waker_ref or.into_waker() functions to turn an Arc<impl ArcWake> into a // Waker. impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc<Self>) { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); arc_self.task_sender.send(cloned).expect("too many tasks queued"); } } impl Executor { fn run(&self) { while let Ok(task) = self.ready_queue.recv() { // Take the future, and if it has not yet completed (is still Some), // poll it in an attempt to complete it. let mut future_slot = task.future.lock().unwrap(); if let Some(mut future) = future_slot.take() { // Create a `LocalWaker` from the task itself let waker = waker_ref(&task); let context = &mut Context::from_waker(&*waker); // `BoxFuture<T>` is a type alias for // `Pin<Box<dyn Future<Output = T> + Send +'static>>`. // We can get a `Pin<&mut dyn Future + Send +'static>` // from it by calling the `Pin::as_mut` method. if let Poll::Pending = future.as_mut().poll(context) { // We're not done processing the future, so put it // back in its task to be run again in the future. *future_slot = Some(future); } } } } } // In practice, this problem is solved through integration with an IO-aware system blocking primitive, // such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all // of which are exposed through the cross-platform Rust crate mio). These primitives all allow a // thread to block on multiple asynchronous IO events, returning once one of the events completes. // In practice, these APIs usually look something like this: /*struct IoBlocker { /*... */ } struct Event { // An ID uniquely identifying the event that occurred and was listened for. id: usize, // A set of signals to wait for, or which occurred. signals: Signals, } impl IoBlocker { /// Create a new collection of asynchronous IO events to block on. fn new() -> Self { /*... */ } /// Express an interest in a particular IO event. fn add_io_event_interest( &self, /// The object on which the event will occur io_object: &IoObject, /// A set of signals that may appear on the `io_object` for /// which an event should be triggered, paired with /// an ID to give to events that result from this interest. event: Event, ) { /*... */ } /// Block until one of the events occurs. fn block(&self) -> Event { /*... */ } } let mut io_blocker = IoBlocker::new(); io_blocker.add_io_event_interest( &socket_1, Event { id: 1, signals: READABLE }, ); io_blocker.add_io_event_interest( &socket_2, Event { id: 2, signals: READABLE | WRITABLE }, ); let event = io_blocker.block(); // prints e.g. "Socket 1 is now READABLE" if socket one became readable. println!("Socket {:?} is now {:?}", event.id, event.signals); */ /* Futures executors can use these primitives to provide asynchronous IO objects such as sockets that can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead example above, the Socket::set_readable_callback function might look like the following pseudocode: impl Socket { fn set_readable_callback(&self, waker: Waker) { // `local_executor` is a reference to the local executor. // this could be provided at creation of the socket, but in practice // many executor implementations pass it down through thread local // storage for convenience. let local_executor = self.local_executor; // Unique ID for this IO object. let id = self.id; // Store the local waker in the executor's map so that it can be called // once the IO event arrives. local_executor.event_map.insert(id, waker); local_executor.add_io_event_interest( &self.socket_file_descriptor, Event { id, signals: READABLE }, ); } } We can now have just one executor thread which can receive and dispatch any IO event to the appropriate Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion before returning to check for more IO events (and the cycle continues...). */ // Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an.await, as // it can cause the threadpool to lock up: one task could take out a lock,.await and yield to the // executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this, // use the Mutex in futures::lock rather than the one from std::sync.
{ println!("Dance!!") }
identifier_body
async_await_basics.rs
use futures::executor::block_on; use std::thread::Thread; use std::sync::mpsc; use futures::join; use { std::{ pin::Pin, task::Waker, thread, }, }; use { futures::{ future::{FutureExt, BoxFuture}, task::{ArcWake, waker_ref}, }, std::{ future::Future, sync::{Arc, Mutex}, sync::mpsc::{sync_channel, SyncSender, Receiver}, task::{Context, Poll}, time::Duration, }, }; fn async_await_basics_main() { println!("Hello, world!"); block_on(async_main()); let (executor, spawner) = new_executor_and_spawner(); // Spawn a task to print before and after waiting on a timer. spawner.spawn(async { println!("howdy!"); // Wait for our timer future to complete after two seconds. TimerFuture::new(Duration::new(2, 0)).await; println!("done!"); }); // Drop the spawner so that our executor knows it is finished and won't // receive more incoming tasks to run. drop(spawner); // Run the executor until the task queue is empty. // This will print "howdy!", pause, and then print "done!". executor.run(); } struct Song { name: String } impl Song { fn new() -> Song { Song { name: String::from("Hotel California") } } } async fn learn_song() -> Song { println!("Learning Song!"); //std::thread::sleep(Duration::from_secs(2)); println!("Good Progress!"); //std::thread::sleep(Duration::from_secs(1)); Song::new() } async fn sing_song(song: Song) { println!("Tune instruments! {}", song.name); //std::thread::sleep(Duration::from_secs(1)); println!("Singing Song! {}", song.name); } async fn dance() { println!("Dance!!") } async fn learn_and_sing() { let song = learn_song().await; sing_song(song).await; } async fn async_main() { let f2 = dance(); let f1 = learn_and_sing(); futures::join!(f2, f1); } // Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures // that have been submitted to an executor. // Waker provides a wake() method that can be used to tell the executor that the associated task // should be awoken. When wake() is called, the executor knows that the task associated with the Waker // is ready to make progress, and its future should be polled again. // Waker also implements clone() so that it can be copied around and stored. pub struct TimerFuture { shared_state: Arc<Mutex<SharedState>>, } /// Shared state between the future and the waiting thread struct SharedState { /// Whether or not the sleep time has elapsed completed: bool, /// The waker for the task that `TimerFuture` is running on. /// The thread can use this after setting `completed = true` to tell /// `TimerFuture`'s task to wake up, see that `completed = true`, and /// move forward. waker: Option<Waker>, } impl Future for TimerFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // Look at the shared state to see if the timer has already completed. let mut shared_state = self.shared_state.lock().unwrap(); if shared_state.completed { Poll::Ready(()) } else { // Set waker so that the thread can wake up the current task // when the timer has completed, ensuring that the future is polled // again and sees that `completed = true`. // // It's tempting to do this once rather than repeatedly cloning // the waker each time. However, the `TimerFuture` can move between // tasks on the executor, which could cause a stale waker pointing // to the wrong task, preventing `TimerFuture` from waking up // correctly. // // N.B. it's possible to check for this using the `Waker::will_wake` // function, but we omit that here to keep things simple. shared_state.waker = Some(cx.waker().clone()); Poll::Pending } } } impl TimerFuture { /// Create a new `TimerFuture` which will complete after the provided /// timeout. pub fn new(duration: Duration) -> Self { let shared_state = Arc::new(Mutex::new(SharedState { completed: false, waker: None, })); // Spawn the new thread let thread_shared_state = shared_state.clone(); thread::spawn(move || { thread::sleep(duration); let mut shared_state = thread_shared_state.lock().unwrap(); // Signal that the timer has completed and wake up the last // task on which the future was polled, if one exists. shared_state.completed = true; if let Some(waker) = shared_state.waker.take() { waker.wake() } }); TimerFuture { shared_state } } } /// Task executor that receives tasks off of a channel and runs them. struct Executor { ready_queue: Receiver<Arc<Task>>, } /// `Spawner` spawns new futures onto the task channel. #[derive(Clone)] struct Spawner { task_sender: SyncSender<Arc<Task>>, } /// A future that can reschedule itself to be polled by an `Executor`. struct Task { /// In-progress future that should be pushed to completion. /// /// The `Mutex` is not necessary for correctness, since we only have /// one thread executing tasks at once. However, Rust isn't smart /// enough to know that `future` is only mutated from one thread, /// so we need use the `Mutex` to prove thread-safety. A production /// executor would not need this, and could use `UnsafeCell` instead. future: Mutex<Option<BoxFuture<'static, ()>>>, /// Handle to place the task itself back onto the task queue. task_sender: SyncSender<Arc<Task>>, } fn new_executor_and_spawner() -> (Executor, Spawner) { // Maximum number of tasks to allow queueing in the channel at once. // This is just to make `sync_channel` happy, and wouldn't be present in // a real executor. const MAX_QUEUED_TASKS: usize = 10_000; let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS); (Executor { ready_queue }, Spawner { task_sender }) } impl Spawner { fn spawn(&self, future: impl Future<Output=()> +'static + Send) { let future = future.boxed(); let task = Arc::new(Task { future: Mutex::new(Some(future)), task_sender: self.task_sender.clone(), }); self.task_sender.send(task).expect("too many tasks queued"); } } // To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers // are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers // tell the executor exactly which task has become ready, allowing them to poll just the futures that // are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake // trait and then using the waker_ref or.into_waker() functions to turn an Arc<impl ArcWake> into a // Waker. impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc<Self>) { // Implement `wake` by sending this task back onto the task channel // so that it will be polled again by the executor. let cloned = arc_self.clone(); arc_self.task_sender.send(cloned).expect("too many tasks queued"); } } impl Executor { fn run(&self) { while let Ok(task) = self.ready_queue.recv() { // Take the future, and if it has not yet completed (is still Some), // poll it in an attempt to complete it. let mut future_slot = task.future.lock().unwrap(); if let Some(mut future) = future_slot.take() { // Create a `LocalWaker` from the task itself let waker = waker_ref(&task); let context = &mut Context::from_waker(&*waker); // `BoxFuture<T>` is a type alias for // `Pin<Box<dyn Future<Output = T> + Send +'static>>`. // We can get a `Pin<&mut dyn Future + Send +'static>` // from it by calling the `Pin::as_mut` method. if let Poll::Pending = future.as_mut().poll(context)
} } } } // In practice, this problem is solved through integration with an IO-aware system blocking primitive, // such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all // of which are exposed through the cross-platform Rust crate mio). These primitives all allow a // thread to block on multiple asynchronous IO events, returning once one of the events completes. // In practice, these APIs usually look something like this: /*struct IoBlocker { /*... */ } struct Event { // An ID uniquely identifying the event that occurred and was listened for. id: usize, // A set of signals to wait for, or which occurred. signals: Signals, } impl IoBlocker { /// Create a new collection of asynchronous IO events to block on. fn new() -> Self { /*... */ } /// Express an interest in a particular IO event. fn add_io_event_interest( &self, /// The object on which the event will occur io_object: &IoObject, /// A set of signals that may appear on the `io_object` for /// which an event should be triggered, paired with /// an ID to give to events that result from this interest. event: Event, ) { /*... */ } /// Block until one of the events occurs. fn block(&self) -> Event { /*... */ } } let mut io_blocker = IoBlocker::new(); io_blocker.add_io_event_interest( &socket_1, Event { id: 1, signals: READABLE }, ); io_blocker.add_io_event_interest( &socket_2, Event { id: 2, signals: READABLE | WRITABLE }, ); let event = io_blocker.block(); // prints e.g. "Socket 1 is now READABLE" if socket one became readable. println!("Socket {:?} is now {:?}", event.id, event.signals); */ /* Futures executors can use these primitives to provide asynchronous IO objects such as sockets that can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead example above, the Socket::set_readable_callback function might look like the following pseudocode: impl Socket { fn set_readable_callback(&self, waker: Waker) { // `local_executor` is a reference to the local executor. // this could be provided at creation of the socket, but in practice // many executor implementations pass it down through thread local // storage for convenience. let local_executor = self.local_executor; // Unique ID for this IO object. let id = self.id; // Store the local waker in the executor's map so that it can be called // once the IO event arrives. local_executor.event_map.insert(id, waker); local_executor.add_io_event_interest( &self.socket_file_descriptor, Event { id, signals: READABLE }, ); } } We can now have just one executor thread which can receive and dispatch any IO event to the appropriate Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion before returning to check for more IO events (and the cycle continues...). */ // Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an.await, as // it can cause the threadpool to lock up: one task could take out a lock,.await and yield to the // executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this, // use the Mutex in futures::lock rather than the one from std::sync.
{ // We're not done processing the future, so put it // back in its task to be run again in the future. *future_slot = Some(future); }
conditional_block
main.rs
//! # Basic Subclass example //! //! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass //! and showcases how you can override virtual funcitons such as `startup` //! and `activate` and how to interact with the GObjects and their private //! structs. extern crate gstreamer as gst; extern crate gstreamer_player as gst_player; use gst::prelude::*; use std::sync::{Arc} #[macro_use] extern crate glib; extern crate gio; extern crate gtk; extern crate once_cell; use gio::prelude::*; use gtk::prelude::*; use gio::subclass::application::ApplicationImplExt; use gio::ApplicationFlags; use glib::subclass; use glib::subclass::prelude::*; use glib::translate::*; use gtk::subclass::prelude::*; use once_cell::unsync::OnceCell; use std::cell::Cell; mod audio_handler; #[derive(Debug)] struct WindowWidgets { headerbar: gtk::HeaderBar, increment: gtk::Button, decrement: gtk::Button, reset: gtk::Button, label: gtk::Label, } // This is the private part of our `SimpleWindow` object. // Its where state and widgets are stored when they don't // need to be publicly accesible. #[derive(Debug)] pub struct SimpleWindowPrivate { widgets: OnceCell<WindowWidgets>, counter: Cell<i64>, } impl ObjectSubclass for SimpleWindowPrivate { const NAME: &'static str = "SimpleWindowPrivate"; type ParentType = gtk::ApplicationWindow; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { widgets: OnceCell::new(), counter: Cell::new(0), } } } static MUSIC_FOLDER: &str = "musics"; impl ObjectImpl for SimpleWindowPrivate { glib_object_impl!(); // Here we are overriding the glib::Objcet::contructed // method. Its what gets called when we create our Object // and where we can initialize things. fn constructed(&self, obj: &glib::Object) { // ==== MUSIC SELCTOR BOX ===== let combo_box = gtk::ComboBoxTextBuilder::new() .width_request(50) .build(); let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap()); all_musics.enumerate().for_each(|(idx, v)| { let name = v.path(); let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), ""); println!("{}", name); combo_box.insert(idx as i32, None, &name); }); let combo_box = Arc::new(combo_box); // Audio player handle let audio_player = Arc::new(audio_handler::AudioHandler::new()); self.parent_constructed(obj); let self_ = obj.downcast_ref::<SimpleWindow>().unwrap(); // Basic UI elements let headerbar = gtk::HeaderBar::new(); let increment = gtk::Button::new_with_label("Add meaning to my life"); let reset = gtk::Button::new_with_label("Reset my life"); let no = gtk::Button::new_with_label("no"); let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;"); let label = gtk::Label::new(Some("What doth life has for you?")); let bbox = gtk::BoxBuilder::new() .orientation(gtk::Orientation::Vertical) .build(); let play_button = gtk::Button::new_with_label("Play"); let pause_button = gtk::Button::new_with_label("Pause"); let tbox = gtk::EntryBuilder::new() .height_request(10) .activates_default(true) .build(); tbox.set_text("I don't know what to do with that textbox DD:"); let test = Arc::new(tbox); let inner_tbox = test.clone(); test.clone().connect_activate(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); inner_tbox.set_text("WHy u pressed enter DDD:"); priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:"); })); bbox.pack_start(test.as_ref(), false, false, 100); bbox.pack_start(&reset, false, false, 10); bbox.pack_start(&no, false, false, 10); bbox.pack_start(&label, false, false, 10); bbox.pack_start(&play_button, false, false, 10); bbox.pack_start(&pause_button, false, false, 10); bbox.pack_start(combo_box.as_ref(), false, false, 10); headerbar.set_title(Some("This is your life now")); headerbar.set_show_close_button(true); headerbar.pack_start(&increment); headerbar.pack_start(&decrement); let audio_player_clone = audio_player.clone(); let combo_box_clone = combo_box.clone(); // Music buttons closures play_button.connect_clicked(move |_| { let music = combo_box_clone.get_active_text().unwrap(); let music = format!("{}/{}", MUSIC_FOLDER, music.as_str()); audio_player_clone.play_music(music); }); let audio_player_clone = audio_player.clone(); pause_button.connect_clicked(move |_| { audio_player_clone.pause_music(); }); // Connect our method `on_increment_clicked` to be called // when the increment button is clicked. increment.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_increment_clicked(); })); decrement.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_decrement_clicked(); })); reset.connect_clicked(clone!(@weak self_ => move |_| { println!("Maybe ;___;"); })); self_.add(&bbox); // self_.add(&label); self_.set_titlebar(Some(&headerbar)); self_.set_default_size(640, 480); self.widgets .set(WindowWidgets { headerbar, label, increment, decrement, reset, }) .expect("Failed to initialize window state"); } } impl SimpleWindowPrivate { fn on_increment_clicked(&self) { self.counter.set(self.counter.get() + 1); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } fn on_decrement_clicked(&self) { self.counter.set(self.counter.get().wrapping_sub(1)); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } } impl WidgetImpl for SimpleWindowPrivate {} impl ContainerImpl for SimpleWindowPrivate {} impl BinImpl for SimpleWindowPrivate {} impl WindowImpl for SimpleWindowPrivate {} impl ApplicationWindowImpl for SimpleWindowPrivate {} glib_wrapper! { pub struct SimpleWindow( Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>, subclass::simple::ClassStruct<SimpleWindowPrivate>, SimpleAppWindowClass>) @extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow; match fn { get_type => || SimpleWindowPrivate::get_type().to_glib(), } } impl SimpleWindow { pub fn new(app: &gtk::Application) -> Self { glib::Object::new(Self::static_type(), &[("application", app)]) .expect("Failed to create SimpleWindow") .downcast::<SimpleWindow>() .expect("Created SimpleWindow is of wrong type") } } #[derive(Debug)] pub struct SimpleApplicationPrivate { window: OnceCell<SimpleWindow>, } impl ObjectSubclass for SimpleApplicationPrivate { const NAME: &'static str = "SimpleApplicationPrivate"; type ParentType = gtk::Application; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { window: OnceCell::new(), } } } impl ObjectImpl for SimpleApplicationPrivate { glib_object_impl!(); } // When our application starts, the `startup` signal will be fired. // This gives us a chance to perform initialisation tasks that are not directly // related to showing a new window. After this, depending on how // the application is started, either `activate` or `open` will be called next. impl ApplicationImpl for SimpleApplicationPrivate { // `gio::Application::activate` is what gets called when the // application is launched by the desktop environment and // aksed to present itself. fn activate(&self, app: &gio::Application) { let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = priv_ .window .get() .expect("Should always be initiliazed in gio_application_startup"); window.show_all(); window.present(); } // `gio::Application` is bit special. It does not get initialized // when `new` is called and the object created, but rather // once the `startup` signal is emitted and the `gio::Application::startup` // is called. // // Due to this, we create and initialize the `SimpleWindow` widget // here. Widgets can't be created before `startup` has been called. fn startup(&self, app: &gio::Application) { self.parent_startup(app); let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = SimpleWindow::new(&app); priv_ .window .set(window) .expect("Failed to initialize application window"); } } impl GtkApplicationImpl for SimpleApplicationPrivate {} glib_wrapper! { pub struct SimpleApplication( Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>, subclass::simple::ClassStruct<SimpleApplicationPrivate>, SimpleApplicationClass>) @extends gio::Application, gtk::Application; match fn { get_type => || SimpleApplicationPrivate::get_type().to_glib(), } } impl SimpleApplication { pub fn new() -> Self
} use std::time::Duration; use std::io::Seek; use std::io::SeekFrom; fn main() { gtk::init().expect("Failed to initialize gtk"); let app = SimpleApplication::new(); let args: Vec<String> = std::env::args().collect(); app.run(&args); }
{ glib::Object::new( Self::static_type(), &[ ("application-id", &"org.gtk-rs.SimpleApplication"), ("flags", &ApplicationFlags::empty()), ], ) .expect("Failed to create SimpleApp") .downcast() .expect("Created simpleapp is of wrong type") }
identifier_body
main.rs
//! # Basic Subclass example //! //! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass //! and showcases how you can override virtual funcitons such as `startup` //! and `activate` and how to interact with the GObjects and their private //! structs. extern crate gstreamer as gst; extern crate gstreamer_player as gst_player; use gst::prelude::*; use std::sync::{Arc} #[macro_use] extern crate glib; extern crate gio; extern crate gtk; extern crate once_cell; use gio::prelude::*; use gtk::prelude::*; use gio::subclass::application::ApplicationImplExt; use gio::ApplicationFlags; use glib::subclass; use glib::subclass::prelude::*; use glib::translate::*; use gtk::subclass::prelude::*; use once_cell::unsync::OnceCell; use std::cell::Cell; mod audio_handler; #[derive(Debug)] struct WindowWidgets { headerbar: gtk::HeaderBar, increment: gtk::Button, decrement: gtk::Button, reset: gtk::Button, label: gtk::Label, } // This is the private part of our `SimpleWindow` object. // Its where state and widgets are stored when they don't // need to be publicly accesible. #[derive(Debug)] pub struct SimpleWindowPrivate { widgets: OnceCell<WindowWidgets>, counter: Cell<i64>, } impl ObjectSubclass for SimpleWindowPrivate { const NAME: &'static str = "SimpleWindowPrivate"; type ParentType = gtk::ApplicationWindow; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { widgets: OnceCell::new(), counter: Cell::new(0), } } } static MUSIC_FOLDER: &str = "musics"; impl ObjectImpl for SimpleWindowPrivate { glib_object_impl!(); // Here we are overriding the glib::Objcet::contructed // method. Its what gets called when we create our Object // and where we can initialize things. fn constructed(&self, obj: &glib::Object) { // ==== MUSIC SELCTOR BOX ===== let combo_box = gtk::ComboBoxTextBuilder::new() .width_request(50) .build(); let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap()); all_musics.enumerate().for_each(|(idx, v)| { let name = v.path(); let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), ""); println!("{}", name); combo_box.insert(idx as i32, None, &name); }); let combo_box = Arc::new(combo_box); // Audio player handle let audio_player = Arc::new(audio_handler::AudioHandler::new()); self.parent_constructed(obj); let self_ = obj.downcast_ref::<SimpleWindow>().unwrap(); // Basic UI elements let headerbar = gtk::HeaderBar::new(); let increment = gtk::Button::new_with_label("Add meaning to my life"); let reset = gtk::Button::new_with_label("Reset my life"); let no = gtk::Button::new_with_label("no"); let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;"); let label = gtk::Label::new(Some("What doth life has for you?")); let bbox = gtk::BoxBuilder::new() .orientation(gtk::Orientation::Vertical) .build(); let play_button = gtk::Button::new_with_label("Play"); let pause_button = gtk::Button::new_with_label("Pause"); let tbox = gtk::EntryBuilder::new() .height_request(10) .activates_default(true) .build(); tbox.set_text("I don't know what to do with that textbox DD:"); let test = Arc::new(tbox); let inner_tbox = test.clone(); test.clone().connect_activate(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); inner_tbox.set_text("WHy u pressed enter DDD:"); priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:"); })); bbox.pack_start(test.as_ref(), false, false, 100); bbox.pack_start(&reset, false, false, 10); bbox.pack_start(&no, false, false, 10); bbox.pack_start(&label, false, false, 10); bbox.pack_start(&play_button, false, false, 10); bbox.pack_start(&pause_button, false, false, 10); bbox.pack_start(combo_box.as_ref(), false, false, 10); headerbar.set_title(Some("This is your life now")); headerbar.set_show_close_button(true); headerbar.pack_start(&increment); headerbar.pack_start(&decrement); let audio_player_clone = audio_player.clone(); let combo_box_clone = combo_box.clone(); // Music buttons closures play_button.connect_clicked(move |_| { let music = combo_box_clone.get_active_text().unwrap(); let music = format!("{}/{}", MUSIC_FOLDER, music.as_str()); audio_player_clone.play_music(music); }); let audio_player_clone = audio_player.clone();
audio_player_clone.pause_music(); }); // Connect our method `on_increment_clicked` to be called // when the increment button is clicked. increment.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_increment_clicked(); })); decrement.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_decrement_clicked(); })); reset.connect_clicked(clone!(@weak self_ => move |_| { println!("Maybe ;___;"); })); self_.add(&bbox); // self_.add(&label); self_.set_titlebar(Some(&headerbar)); self_.set_default_size(640, 480); self.widgets .set(WindowWidgets { headerbar, label, increment, decrement, reset, }) .expect("Failed to initialize window state"); } } impl SimpleWindowPrivate { fn on_increment_clicked(&self) { self.counter.set(self.counter.get() + 1); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } fn on_decrement_clicked(&self) { self.counter.set(self.counter.get().wrapping_sub(1)); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } } impl WidgetImpl for SimpleWindowPrivate {} impl ContainerImpl for SimpleWindowPrivate {} impl BinImpl for SimpleWindowPrivate {} impl WindowImpl for SimpleWindowPrivate {} impl ApplicationWindowImpl for SimpleWindowPrivate {} glib_wrapper! { pub struct SimpleWindow( Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>, subclass::simple::ClassStruct<SimpleWindowPrivate>, SimpleAppWindowClass>) @extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow; match fn { get_type => || SimpleWindowPrivate::get_type().to_glib(), } } impl SimpleWindow { pub fn new(app: &gtk::Application) -> Self { glib::Object::new(Self::static_type(), &[("application", app)]) .expect("Failed to create SimpleWindow") .downcast::<SimpleWindow>() .expect("Created SimpleWindow is of wrong type") } } #[derive(Debug)] pub struct SimpleApplicationPrivate { window: OnceCell<SimpleWindow>, } impl ObjectSubclass for SimpleApplicationPrivate { const NAME: &'static str = "SimpleApplicationPrivate"; type ParentType = gtk::Application; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { window: OnceCell::new(), } } } impl ObjectImpl for SimpleApplicationPrivate { glib_object_impl!(); } // When our application starts, the `startup` signal will be fired. // This gives us a chance to perform initialisation tasks that are not directly // related to showing a new window. After this, depending on how // the application is started, either `activate` or `open` will be called next. impl ApplicationImpl for SimpleApplicationPrivate { // `gio::Application::activate` is what gets called when the // application is launched by the desktop environment and // aksed to present itself. fn activate(&self, app: &gio::Application) { let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = priv_ .window .get() .expect("Should always be initiliazed in gio_application_startup"); window.show_all(); window.present(); } // `gio::Application` is bit special. It does not get initialized // when `new` is called and the object created, but rather // once the `startup` signal is emitted and the `gio::Application::startup` // is called. // // Due to this, we create and initialize the `SimpleWindow` widget // here. Widgets can't be created before `startup` has been called. fn startup(&self, app: &gio::Application) { self.parent_startup(app); let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = SimpleWindow::new(&app); priv_ .window .set(window) .expect("Failed to initialize application window"); } } impl GtkApplicationImpl for SimpleApplicationPrivate {} glib_wrapper! { pub struct SimpleApplication( Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>, subclass::simple::ClassStruct<SimpleApplicationPrivate>, SimpleApplicationClass>) @extends gio::Application, gtk::Application; match fn { get_type => || SimpleApplicationPrivate::get_type().to_glib(), } } impl SimpleApplication { pub fn new() -> Self { glib::Object::new( Self::static_type(), &[ ("application-id", &"org.gtk-rs.SimpleApplication"), ("flags", &ApplicationFlags::empty()), ], ) .expect("Failed to create SimpleApp") .downcast() .expect("Created simpleapp is of wrong type") } } use std::time::Duration; use std::io::Seek; use std::io::SeekFrom; fn main() { gtk::init().expect("Failed to initialize gtk"); let app = SimpleApplication::new(); let args: Vec<String> = std::env::args().collect(); app.run(&args); }
pause_button.connect_clicked(move |_| {
random_line_split
main.rs
//! # Basic Subclass example //! //! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass //! and showcases how you can override virtual funcitons such as `startup` //! and `activate` and how to interact with the GObjects and their private //! structs. extern crate gstreamer as gst; extern crate gstreamer_player as gst_player; use gst::prelude::*; use std::sync::{Arc} #[macro_use] extern crate glib; extern crate gio; extern crate gtk; extern crate once_cell; use gio::prelude::*; use gtk::prelude::*; use gio::subclass::application::ApplicationImplExt; use gio::ApplicationFlags; use glib::subclass; use glib::subclass::prelude::*; use glib::translate::*; use gtk::subclass::prelude::*; use once_cell::unsync::OnceCell; use std::cell::Cell; mod audio_handler; #[derive(Debug)] struct WindowWidgets { headerbar: gtk::HeaderBar, increment: gtk::Button, decrement: gtk::Button, reset: gtk::Button, label: gtk::Label, } // This is the private part of our `SimpleWindow` object. // Its where state and widgets are stored when they don't // need to be publicly accesible. #[derive(Debug)] pub struct SimpleWindowPrivate { widgets: OnceCell<WindowWidgets>, counter: Cell<i64>, } impl ObjectSubclass for SimpleWindowPrivate { const NAME: &'static str = "SimpleWindowPrivate"; type ParentType = gtk::ApplicationWindow; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { widgets: OnceCell::new(), counter: Cell::new(0), } } } static MUSIC_FOLDER: &str = "musics"; impl ObjectImpl for SimpleWindowPrivate { glib_object_impl!(); // Here we are overriding the glib::Objcet::contructed // method. Its what gets called when we create our Object // and where we can initialize things. fn constructed(&self, obj: &glib::Object) { // ==== MUSIC SELCTOR BOX ===== let combo_box = gtk::ComboBoxTextBuilder::new() .width_request(50) .build(); let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap()); all_musics.enumerate().for_each(|(idx, v)| { let name = v.path(); let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), ""); println!("{}", name); combo_box.insert(idx as i32, None, &name); }); let combo_box = Arc::new(combo_box); // Audio player handle let audio_player = Arc::new(audio_handler::AudioHandler::new()); self.parent_constructed(obj); let self_ = obj.downcast_ref::<SimpleWindow>().unwrap(); // Basic UI elements let headerbar = gtk::HeaderBar::new(); let increment = gtk::Button::new_with_label("Add meaning to my life"); let reset = gtk::Button::new_with_label("Reset my life"); let no = gtk::Button::new_with_label("no"); let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;"); let label = gtk::Label::new(Some("What doth life has for you?")); let bbox = gtk::BoxBuilder::new() .orientation(gtk::Orientation::Vertical) .build(); let play_button = gtk::Button::new_with_label("Play"); let pause_button = gtk::Button::new_with_label("Pause"); let tbox = gtk::EntryBuilder::new() .height_request(10) .activates_default(true) .build(); tbox.set_text("I don't know what to do with that textbox DD:"); let test = Arc::new(tbox); let inner_tbox = test.clone(); test.clone().connect_activate(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); inner_tbox.set_text("WHy u pressed enter DDD:"); priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:"); })); bbox.pack_start(test.as_ref(), false, false, 100); bbox.pack_start(&reset, false, false, 10); bbox.pack_start(&no, false, false, 10); bbox.pack_start(&label, false, false, 10); bbox.pack_start(&play_button, false, false, 10); bbox.pack_start(&pause_button, false, false, 10); bbox.pack_start(combo_box.as_ref(), false, false, 10); headerbar.set_title(Some("This is your life now")); headerbar.set_show_close_button(true); headerbar.pack_start(&increment); headerbar.pack_start(&decrement); let audio_player_clone = audio_player.clone(); let combo_box_clone = combo_box.clone(); // Music buttons closures play_button.connect_clicked(move |_| { let music = combo_box_clone.get_active_text().unwrap(); let music = format!("{}/{}", MUSIC_FOLDER, music.as_str()); audio_player_clone.play_music(music); }); let audio_player_clone = audio_player.clone(); pause_button.connect_clicked(move |_| { audio_player_clone.pause_music(); }); // Connect our method `on_increment_clicked` to be called // when the increment button is clicked. increment.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_increment_clicked(); })); decrement.connect_clicked(clone!(@weak self_ => move |_| { let priv_ = SimpleWindowPrivate::from_instance(&self_); priv_.on_decrement_clicked(); })); reset.connect_clicked(clone!(@weak self_ => move |_| { println!("Maybe ;___;"); })); self_.add(&bbox); // self_.add(&label); self_.set_titlebar(Some(&headerbar)); self_.set_default_size(640, 480); self.widgets .set(WindowWidgets { headerbar, label, increment, decrement, reset, }) .expect("Failed to initialize window state"); } } impl SimpleWindowPrivate { fn
(&self) { self.counter.set(self.counter.get() + 1); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } fn on_decrement_clicked(&self) { self.counter.set(self.counter.get().wrapping_sub(1)); let w = self.widgets.get().unwrap(); w.label .set_text(&format!("Your life has {} meaning", self.counter.get())); } } impl WidgetImpl for SimpleWindowPrivate {} impl ContainerImpl for SimpleWindowPrivate {} impl BinImpl for SimpleWindowPrivate {} impl WindowImpl for SimpleWindowPrivate {} impl ApplicationWindowImpl for SimpleWindowPrivate {} glib_wrapper! { pub struct SimpleWindow( Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>, subclass::simple::ClassStruct<SimpleWindowPrivate>, SimpleAppWindowClass>) @extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow; match fn { get_type => || SimpleWindowPrivate::get_type().to_glib(), } } impl SimpleWindow { pub fn new(app: &gtk::Application) -> Self { glib::Object::new(Self::static_type(), &[("application", app)]) .expect("Failed to create SimpleWindow") .downcast::<SimpleWindow>() .expect("Created SimpleWindow is of wrong type") } } #[derive(Debug)] pub struct SimpleApplicationPrivate { window: OnceCell<SimpleWindow>, } impl ObjectSubclass for SimpleApplicationPrivate { const NAME: &'static str = "SimpleApplicationPrivate"; type ParentType = gtk::Application; type Instance = subclass::simple::InstanceStruct<Self>; type Class = subclass::simple::ClassStruct<Self>; glib_object_subclass!(); fn new() -> Self { Self { window: OnceCell::new(), } } } impl ObjectImpl for SimpleApplicationPrivate { glib_object_impl!(); } // When our application starts, the `startup` signal will be fired. // This gives us a chance to perform initialisation tasks that are not directly // related to showing a new window. After this, depending on how // the application is started, either `activate` or `open` will be called next. impl ApplicationImpl for SimpleApplicationPrivate { // `gio::Application::activate` is what gets called when the // application is launched by the desktop environment and // aksed to present itself. fn activate(&self, app: &gio::Application) { let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = priv_ .window .get() .expect("Should always be initiliazed in gio_application_startup"); window.show_all(); window.present(); } // `gio::Application` is bit special. It does not get initialized // when `new` is called and the object created, but rather // once the `startup` signal is emitted and the `gio::Application::startup` // is called. // // Due to this, we create and initialize the `SimpleWindow` widget // here. Widgets can't be created before `startup` has been called. fn startup(&self, app: &gio::Application) { self.parent_startup(app); let app = app.downcast_ref::<gtk::Application>().unwrap(); let priv_ = SimpleApplicationPrivate::from_instance(app); let window = SimpleWindow::new(&app); priv_ .window .set(window) .expect("Failed to initialize application window"); } } impl GtkApplicationImpl for SimpleApplicationPrivate {} glib_wrapper! { pub struct SimpleApplication( Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>, subclass::simple::ClassStruct<SimpleApplicationPrivate>, SimpleApplicationClass>) @extends gio::Application, gtk::Application; match fn { get_type => || SimpleApplicationPrivate::get_type().to_glib(), } } impl SimpleApplication { pub fn new() -> Self { glib::Object::new( Self::static_type(), &[ ("application-id", &"org.gtk-rs.SimpleApplication"), ("flags", &ApplicationFlags::empty()), ], ) .expect("Failed to create SimpleApp") .downcast() .expect("Created simpleapp is of wrong type") } } use std::time::Duration; use std::io::Seek; use std::io::SeekFrom; fn main() { gtk::init().expect("Failed to initialize gtk"); let app = SimpleApplication::new(); let args: Vec<String> = std::env::args().collect(); app.run(&args); }
on_increment_clicked
identifier_name
lib.rs
//! A thread-safe object pool with automatic return and attach/detach semantics //! //! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects //! //! # Examples //! //! ## Creating a Pool //! //! The general pool creation looks like this //! ``` //! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new()); //! ``` //! Example pool with 32 `Vec<u8>` with capacity of 4096 //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! ``` //! //! ## Using a Pool //! //! Basic usage for pulling from the pool //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! some_file.read_to_end(reusable_buff); //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! Pull from pool and `detach()` //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! let (pool, reusable_buff) = reusable_buff.detach(); //! let mut s = String::from(reusable_buff); //! s.push_str("hello, world!"); //! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! //! ## Using Across Threads //! //! You simply wrap the pool in a [`std::sync::Arc`] //! ``` //! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new())); //! ``` //! //! # Warning //! //! Objects in the pool are not automatically reset, they are returned but NOT reset //! You may want to call `object.reset()` or `object.clear()` //! or any other equivalent for the object that you are using, after pulling from the pool //! //! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html mod multi_buf; mod semphore; pub use multi_buf::{MultiBuffer, GetSegs}; use crossbeam::channel; use std::ops::{Deref, DerefMut}; use parking_lot::{Mutex, Condvar}; use std::mem::{ManuallyDrop, forget}; use std::sync::Arc; use std::thread; use log::{trace}; pub use semphore::Semphore; use parking_lot::lock_api::MutexGuard; use futures::SinkExt; use std::thread::sleep; pub type Stack<T> = Vec<T>; pub struct PendingInfo<T> where T: Sync + Send +'static { id: String, notifier: channel::Sender<T>, } pub struct WaitingInfo<T> where T: Sync + Send +'static { id: String, //发送恢复命令 notifier: channel::Sender<T>, ///最低需要多少个内存单元才能恢复 min_request: usize, } pub struct MemoryPool<T> where T: Sync + Send +'static { objects: (channel::Sender<T>, channel::Receiver<T>), // the one wait for data pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>, ///those who is sleeping waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>, run_block: Arc<Mutex<()>>, pending_block: Arc<Mutex<()>>, // recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>), } impl<T> MemoryPool<T> where T: Sync + Send +'static { #[inline] pub fn new<F>(cap: usize, init: F) -> MemoryPool<T> where F: Fn() -> T, { // //println!("mempool remains:{}", cap); log::trace!("mempool remains:{}", cap); let mut objects = channel::unbounded(); for _ in 0..cap { &objects.0.send(init()); } MemoryPool { objects, pending: Arc::new(Mutex::new(Vec::new())), waiting: Arc::new(Mutex::new(Vec::new())), run_block: Arc::new(Mutex::new(())), pending_block: Arc::new(Mutex::new(())), } } #[inline] pub fn len(&self) -> usize { self.objects.1.len() } #[inline] pub fn is_empty(&self) -> bool { self.objects.1.is_empty() } #[inline] pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) { log::trace!("pending item:{}", str); let _x = self.pending_block.lock(); let ret = if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); (Some(Reusable::new(&self, item)), false) /* } else if (self.pending.lock().len() == 0) { log::trace!("get should pend:{}", str); self.pending.lock().push(PendingInfo { id: String::from(str), notifier: sender.clone(), }); (None, false)*/ } else { let to_retry = { self.waiting.lock().len() * 60 + 2 }; log::trace!("try again :{} with retries backoff:{}", str, to_retry); for i in 0..to_retry { sleep(std::time::Duration::from_secs(1)); if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); return (Some(Reusable::new(&self, item)), false); } } log::trace!("get should sleep :{}", str); self.waiting.lock().push(WaitingInfo { id: String::from(str), notifier: sender.clone(), min_request: releasable, }); (None, true) }; ret } #[inline] pub fn attach(&'static self, t: T) { let
elf.run_block.lock(); log::trace!("attach started<<<<<<<<<<<<<<<<"); log::trace!("recyled an item "); let mut wait_list = { self.waiting.lock() }; log::trace!("check waiting list ok :{}", wait_list.len()); if wait_list.len() > 0 && self.len() >= wait_list[0].min_request { log::trace!("remove ok<<<<<<<<<<<<<<< "); let item = wait_list.remove(0); log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<"); //&wait_list.remove(0); self.objects.0.send(t).unwrap(); log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request); for i in 0..item.min_request + 1 { item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{ log::warn!("notifier send failed"); }); } drop(item); // thread::spawn(move || { // item.notifier.send(()).unwrap(); // }); } else if self.pending.lock().len() > 0 { drop(wait_list); let pending_item = self.pending.lock().remove(0); log::trace!("fill pending:{}", pending_item.id); // thread::spawn(move || { // pending_item.notifier.send(()); // }); pending_item.notifier.send(Reusable::new(&self, t)); } else { // drop(wait_list); self.objects.0.send(t).unwrap(); log::trace!("push to queue:{}", self.len()); } } } pub struct Reusable<T> where T: Sync + Send +'static { pool: &'static MemoryPool<T>, data: ManuallyDrop<T>, } impl<T> Reusable<T> where T: Sync + Send +'static { #[inline] pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self { Self { pool, data: ManuallyDrop::new(t), } } // #[inline] // pub fn detach(mut self) -> (&'a MemoryPool<T>, T) { // let ret = unsafe { (self.pool, self.take()) }; // forget(self); // ret // } // unsafe fn take(&mut self) -> T { ManuallyDrop::take(&mut self.data) } } impl<T> Deref for Reusable<T> where T: Sync + Send +'static { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl<T> DerefMut for Reusable<T> where T: Sync + Send +'static { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } impl<T> Drop for Reusable<T> where T: Sync + Send +'static { #[inline] fn drop(&mut self) { unsafe { self.pool.attach(self.take()); } } } #[cfg(test)] mod tests { use crate::{MemoryPool, Reusable}; use std::mem::drop; use std::ops::DerefMut; use std::thread; use std::sync::Arc; // #[test] // fn pull() { // let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new())); // let pool2 = pool.clone(); // let t1 = thread::spawn(move ||{ // let object1 = pool.lock().pull(); // //println!("retain 1"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object2 = pool.pull(); // //println!("retain 2"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object3 = pool.pull(); // //println!("retain 3"); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 1"); // drop(object1); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 2"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 3"); // drop(object3); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // let t2 = thread::spawn(move ||{ // //println!(">>>wait for 2.5s"); // thread::sleep(std::time::Duration::from_millis(2500)); // //println!(">>>try to retain 1....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 2....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 3....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // // thread::sleep(std::time::Duration::from_secs(1)); // // //println!(">>>dropped"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // t1.join(); // t2.join(); // // } #[test] fn e2e() { // let pool = MemoryPool::new(10, || Vec::new()); // let mut objects = Vec::new(); // // thread::spawn(||{ // for i in 0..10 { // let mut object = pool.pull(); // } // }); // // // // drop(objects); // // // for i in 10..0 { // let mut object = pool.objects.lock().pop().unwrap(); // assert_eq!(object.pop(), Some(i)); // } } }
_x = s
identifier_name
lib.rs
//! A thread-safe object pool with automatic return and attach/detach semantics //! //! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects //! //! # Examples //! //! ## Creating a Pool //! //! The general pool creation looks like this //! ``` //! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new()); //! ``` //! Example pool with 32 `Vec<u8>` with capacity of 4096 //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! ``` //! //! ## Using a Pool //! //! Basic usage for pulling from the pool //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! some_file.read_to_end(reusable_buff); //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! Pull from pool and `detach()` //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! let (pool, reusable_buff) = reusable_buff.detach(); //! let mut s = String::from(reusable_buff); //! s.push_str("hello, world!"); //! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! //! ## Using Across Threads //! //! You simply wrap the pool in a [`std::sync::Arc`] //! ``` //! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new())); //! ``` //! //! # Warning //! //! Objects in the pool are not automatically reset, they are returned but NOT reset //! You may want to call `object.reset()` or `object.clear()` //! or any other equivalent for the object that you are using, after pulling from the pool //! //! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html mod multi_buf; mod semphore; pub use multi_buf::{MultiBuffer, GetSegs}; use crossbeam::channel; use std::ops::{Deref, DerefMut}; use parking_lot::{Mutex, Condvar}; use std::mem::{ManuallyDrop, forget}; use std::sync::Arc; use std::thread; use log::{trace}; pub use semphore::Semphore; use parking_lot::lock_api::MutexGuard; use futures::SinkExt; use std::thread::sleep; pub type Stack<T> = Vec<T>; pub struct PendingInfo<T> where T: Sync + Send +'static { id: String, notifier: channel::Sender<T>, } pub struct WaitingInfo<T> where T: Sync + Send +'static { id: String, //发送恢复命令 notifier: channel::Sender<T>, ///最低需要多少个内存单元才能恢复 min_request: usize, } pub struct MemoryPool<T> where T: Sync + Send +'static { objects: (channel::Sender<T>, channel::Receiver<T>), // the one wait for data pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>, ///those who is sleeping waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>, run_block: Arc<Mutex<()>>, pending_block: Arc<Mutex<()>>, // recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>), } impl<T> MemoryPool<T> where T: Sync + Send +'static { #[inline] pub fn new<F>(cap: usize, init: F) -> MemoryPool<T> where F: Fn() -> T, { // //println!("mempool remains:{}", cap); log::trace!("mempool remains:{}", cap); let mut objects = channel::unbounded(); for _ in 0..cap { &objects.0.send(init()); } MemoryPool { objects, pending: Arc::new(Mutex::new(Vec::new())), waiting: Arc::new(Mutex::new(Vec::new())), run_block: Arc::new(Mutex::new(())), pending_block: Arc::new(Mutex::new(())), } } #[inline] pub fn len(&self) -> usize { self.objects.1.len() } #[inline] pub fn is_empty(&self) -> bool { self.objects.1.is_empty() } #[inline] pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) { log::trace!("pending item:{}", str); let _x = self.pending_block.lock(); let ret = if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str
waiting.lock().len() * 60 + 2 }; log::trace!("try again :{} with retries backoff:{}", str, to_retry); for i in 0..to_retry { sleep(std::time::Duration::from_secs(1)); if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); return (Some(Reusable::new(&self, item)), false); } } log::trace!("get should sleep :{}", str); self.waiting.lock().push(WaitingInfo { id: String::from(str), notifier: sender.clone(), min_request: releasable, }); (None, true) }; ret } #[inline] pub fn attach(&'static self, t: T) { let _x = self.run_block.lock(); log::trace!("attach started<<<<<<<<<<<<<<<<"); log::trace!("recyled an item "); let mut wait_list = { self.waiting.lock() }; log::trace!("check waiting list ok :{}", wait_list.len()); if wait_list.len() > 0 && self.len() >= wait_list[0].min_request { log::trace!("remove ok<<<<<<<<<<<<<<< "); let item = wait_list.remove(0); log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<"); //&wait_list.remove(0); self.objects.0.send(t).unwrap(); log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request); for i in 0..item.min_request + 1 { item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{ log::warn!("notifier send failed"); }); } drop(item); // thread::spawn(move || { // item.notifier.send(()).unwrap(); // }); } else if self.pending.lock().len() > 0 { drop(wait_list); let pending_item = self.pending.lock().remove(0); log::trace!("fill pending:{}", pending_item.id); // thread::spawn(move || { // pending_item.notifier.send(()); // }); pending_item.notifier.send(Reusable::new(&self, t)); } else { // drop(wait_list); self.objects.0.send(t).unwrap(); log::trace!("push to queue:{}", self.len()); } } } pub struct Reusable<T> where T: Sync + Send +'static { pool: &'static MemoryPool<T>, data: ManuallyDrop<T>, } impl<T> Reusable<T> where T: Sync + Send +'static { #[inline] pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self { Self { pool, data: ManuallyDrop::new(t), } } // #[inline] // pub fn detach(mut self) -> (&'a MemoryPool<T>, T) { // let ret = unsafe { (self.pool, self.take()) }; // forget(self); // ret // } // unsafe fn take(&mut self) -> T { ManuallyDrop::take(&mut self.data) } } impl<T> Deref for Reusable<T> where T: Sync + Send +'static { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl<T> DerefMut for Reusable<T> where T: Sync + Send +'static { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } impl<T> Drop for Reusable<T> where T: Sync + Send +'static { #[inline] fn drop(&mut self) { unsafe { self.pool.attach(self.take()); } } } #[cfg(test)] mod tests { use crate::{MemoryPool, Reusable}; use std::mem::drop; use std::ops::DerefMut; use std::thread; use std::sync::Arc; // #[test] // fn pull() { // let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new())); // let pool2 = pool.clone(); // let t1 = thread::spawn(move ||{ // let object1 = pool.lock().pull(); // //println!("retain 1"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object2 = pool.pull(); // //println!("retain 2"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object3 = pool.pull(); // //println!("retain 3"); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 1"); // drop(object1); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 2"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 3"); // drop(object3); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // let t2 = thread::spawn(move ||{ // //println!(">>>wait for 2.5s"); // thread::sleep(std::time::Duration::from_millis(2500)); // //println!(">>>try to retain 1....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 2....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 3....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // // thread::sleep(std::time::Duration::from_secs(1)); // // //println!(">>>dropped"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // t1.join(); // t2.join(); // // } #[test] fn e2e() { // let pool = MemoryPool::new(10, || Vec::new()); // let mut objects = Vec::new(); // // thread::spawn(||{ // for i in 0..10 { // let mut object = pool.pull(); // } // }); // // // // drop(objects); // // // for i in 10..0 { // let mut object = pool.objects.lock().pop().unwrap(); // assert_eq!(object.pop(), Some(i)); // } } }
); (Some(Reusable::new(&self, item)), false) /* } else if (self.pending.lock().len() == 0) { log::trace!("get should pend:{}", str); self.pending.lock().push(PendingInfo { id: String::from(str), notifier: sender.clone(), }); (None, false)*/ } else { let to_retry = { self.
conditional_block
lib.rs
//! A thread-safe object pool with automatic return and attach/detach semantics //! //! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects //! //! # Examples //! //! ## Creating a Pool //! //! The general pool creation looks like this //! ``` //! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new()); //! ``` //! Example pool with 32 `Vec<u8>` with capacity of 4096 //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! ``` //! //! ## Using a Pool //! //! Basic usage for pulling from the pool //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! some_file.read_to_end(reusable_buff); //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! Pull from pool and `detach()` //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! let (pool, reusable_buff) = reusable_buff.detach(); //! let mut s = String::from(reusable_buff); //! s.push_str("hello, world!"); //! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! //! ## Using Across Threads //! //! You simply wrap the pool in a [`std::sync::Arc`] //! ``` //! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new())); //! ``` //! //! # Warning //! //! Objects in the pool are not automatically reset, they are returned but NOT reset //! You may want to call `object.reset()` or `object.clear()` //! or any other equivalent for the object that you are using, after pulling from the pool //! //! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html mod multi_buf; mod semphore; pub use multi_buf::{MultiBuffer, GetSegs}; use crossbeam::channel; use std::ops::{Deref, DerefMut}; use parking_lot::{Mutex, Condvar}; use std::mem::{ManuallyDrop, forget}; use std::sync::Arc; use std::thread; use log::{trace}; pub use semphore::Semphore; use parking_lot::lock_api::MutexGuard; use futures::SinkExt; use std::thread::sleep; pub type Stack<T> = Vec<T>; pub struct PendingInfo<T> where T: Sync + Send +'static { id: String, notifier: channel::Sender<T>, } pub struct WaitingInfo<T> where T: Sync + Send +'static { id: String, //发送恢复命令 notifier: channel::Sender<T>, ///最低需要多少个内存单元才能恢复 min_request: usize, } pub struct MemoryPool<T> where T: Sync + Send +'static { objects: (channel::Sender<T>, channel::Receiver<T>), // the one wait for data pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>, ///those who is sleeping waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>, run_block: Arc<Mutex<()>>, pending_block: Arc<Mutex<()>>, // recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>), } impl<T> MemoryPool<T> where T: Sync + Send +'static { #[inline] pub fn new<F>(cap: usize, init: F) -> MemoryPool<T> where F: Fn() -> T, { // //println!("mempool remains:
usize { self.objects.1.len() } #[inline] pub fn is_empty(&self) -> bool { self.objects.1.is_empty() } #[inline] pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) { log::trace!("pending item:{}", str); let _x = self.pending_block.lock(); let ret = if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); (Some(Reusable::new(&self, item)), false) /* } else if (self.pending.lock().len() == 0) { log::trace!("get should pend:{}", str); self.pending.lock().push(PendingInfo { id: String::from(str), notifier: sender.clone(), }); (None, false)*/ } else { let to_retry = { self.waiting.lock().len() * 60 + 2 }; log::trace!("try again :{} with retries backoff:{}", str, to_retry); for i in 0..to_retry { sleep(std::time::Duration::from_secs(1)); if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); return (Some(Reusable::new(&self, item)), false); } } log::trace!("get should sleep :{}", str); self.waiting.lock().push(WaitingInfo { id: String::from(str), notifier: sender.clone(), min_request: releasable, }); (None, true) }; ret } #[inline] pub fn attach(&'static self, t: T) { let _x = self.run_block.lock(); log::trace!("attach started<<<<<<<<<<<<<<<<"); log::trace!("recyled an item "); let mut wait_list = { self.waiting.lock() }; log::trace!("check waiting list ok :{}", wait_list.len()); if wait_list.len() > 0 && self.len() >= wait_list[0].min_request { log::trace!("remove ok<<<<<<<<<<<<<<< "); let item = wait_list.remove(0); log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<"); //&wait_list.remove(0); self.objects.0.send(t).unwrap(); log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request); for i in 0..item.min_request + 1 { item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{ log::warn!("notifier send failed"); }); } drop(item); // thread::spawn(move || { // item.notifier.send(()).unwrap(); // }); } else if self.pending.lock().len() > 0 { drop(wait_list); let pending_item = self.pending.lock().remove(0); log::trace!("fill pending:{}", pending_item.id); // thread::spawn(move || { // pending_item.notifier.send(()); // }); pending_item.notifier.send(Reusable::new(&self, t)); } else { // drop(wait_list); self.objects.0.send(t).unwrap(); log::trace!("push to queue:{}", self.len()); } } } pub struct Reusable<T> where T: Sync + Send +'static { pool: &'static MemoryPool<T>, data: ManuallyDrop<T>, } impl<T> Reusable<T> where T: Sync + Send +'static { #[inline] pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self { Self { pool, data: ManuallyDrop::new(t), } } // #[inline] // pub fn detach(mut self) -> (&'a MemoryPool<T>, T) { // let ret = unsafe { (self.pool, self.take()) }; // forget(self); // ret // } // unsafe fn take(&mut self) -> T { ManuallyDrop::take(&mut self.data) } } impl<T> Deref for Reusable<T> where T: Sync + Send +'static { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl<T> DerefMut for Reusable<T> where T: Sync + Send +'static { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } impl<T> Drop for Reusable<T> where T: Sync + Send +'static { #[inline] fn drop(&mut self) { unsafe { self.pool.attach(self.take()); } } } #[cfg(test)] mod tests { use crate::{MemoryPool, Reusable}; use std::mem::drop; use std::ops::DerefMut; use std::thread; use std::sync::Arc; // #[test] // fn pull() { // let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new())); // let pool2 = pool.clone(); // let t1 = thread::spawn(move ||{ // let object1 = pool.lock().pull(); // //println!("retain 1"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object2 = pool.pull(); // //println!("retain 2"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object3 = pool.pull(); // //println!("retain 3"); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 1"); // drop(object1); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 2"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 3"); // drop(object3); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // let t2 = thread::spawn(move ||{ // //println!(">>>wait for 2.5s"); // thread::sleep(std::time::Duration::from_millis(2500)); // //println!(">>>try to retain 1....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 2....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 3....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // // thread::sleep(std::time::Duration::from_secs(1)); // // //println!(">>>dropped"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // t1.join(); // t2.join(); // // } #[test] fn e2e() { // let pool = MemoryPool::new(10, || Vec::new()); // let mut objects = Vec::new(); // // thread::spawn(||{ // for i in 0..10 { // let mut object = pool.pull(); // } // }); // // // // drop(objects); // // // for i in 10..0 { // let mut object = pool.objects.lock().pop().unwrap(); // assert_eq!(object.pop(), Some(i)); // } } }
{}", cap); log::trace!("mempool remains:{}", cap); let mut objects = channel::unbounded(); for _ in 0..cap { &objects.0.send(init()); } MemoryPool { objects, pending: Arc::new(Mutex::new(Vec::new())), waiting: Arc::new(Mutex::new(Vec::new())), run_block: Arc::new(Mutex::new(())), pending_block: Arc::new(Mutex::new(())), } } #[inline] pub fn len(&self) ->
identifier_body
lib.rs
//! A thread-safe object pool with automatic return and attach/detach semantics //! //! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects //! //! # Examples //! //! ## Creating a Pool //! //! The general pool creation looks like this //! ``` //! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new()); //! ``` //! Example pool with 32 `Vec<u8>` with capacity of 4096 //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! ``` //! //! ## Using a Pool //! //! Basic usage for pulling from the pool //! ```
//! some_file.read_to_end(reusable_buff); //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! Pull from pool and `detach()` //! ``` //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using //! let (pool, reusable_buff) = reusable_buff.detach(); //! let mut s = String::from(reusable_buff); //! s.push_str("hello, world!"); //! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope //! // reusable_buff is automatically returned to the pool when it goes out of scope //! ``` //! //! ## Using Across Threads //! //! You simply wrap the pool in a [`std::sync::Arc`] //! ``` //! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new())); //! ``` //! //! # Warning //! //! Objects in the pool are not automatically reset, they are returned but NOT reset //! You may want to call `object.reset()` or `object.clear()` //! or any other equivalent for the object that you are using, after pulling from the pool //! //! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html mod multi_buf; mod semphore; pub use multi_buf::{MultiBuffer, GetSegs}; use crossbeam::channel; use std::ops::{Deref, DerefMut}; use parking_lot::{Mutex, Condvar}; use std::mem::{ManuallyDrop, forget}; use std::sync::Arc; use std::thread; use log::{trace}; pub use semphore::Semphore; use parking_lot::lock_api::MutexGuard; use futures::SinkExt; use std::thread::sleep; pub type Stack<T> = Vec<T>; pub struct PendingInfo<T> where T: Sync + Send +'static { id: String, notifier: channel::Sender<T>, } pub struct WaitingInfo<T> where T: Sync + Send +'static { id: String, //发送恢复命令 notifier: channel::Sender<T>, ///最低需要多少个内存单元才能恢复 min_request: usize, } pub struct MemoryPool<T> where T: Sync + Send +'static { objects: (channel::Sender<T>, channel::Receiver<T>), // the one wait for data pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>, ///those who is sleeping waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>, run_block: Arc<Mutex<()>>, pending_block: Arc<Mutex<()>>, // recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>), } impl<T> MemoryPool<T> where T: Sync + Send +'static { #[inline] pub fn new<F>(cap: usize, init: F) -> MemoryPool<T> where F: Fn() -> T, { // //println!("mempool remains:{}", cap); log::trace!("mempool remains:{}", cap); let mut objects = channel::unbounded(); for _ in 0..cap { &objects.0.send(init()); } MemoryPool { objects, pending: Arc::new(Mutex::new(Vec::new())), waiting: Arc::new(Mutex::new(Vec::new())), run_block: Arc::new(Mutex::new(())), pending_block: Arc::new(Mutex::new(())), } } #[inline] pub fn len(&self) -> usize { self.objects.1.len() } #[inline] pub fn is_empty(&self) -> bool { self.objects.1.is_empty() } #[inline] pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) { log::trace!("pending item:{}", str); let _x = self.pending_block.lock(); let ret = if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); (Some(Reusable::new(&self, item)), false) /* } else if (self.pending.lock().len() == 0) { log::trace!("get should pend:{}", str); self.pending.lock().push(PendingInfo { id: String::from(str), notifier: sender.clone(), }); (None, false)*/ } else { let to_retry = { self.waiting.lock().len() * 60 + 2 }; log::trace!("try again :{} with retries backoff:{}", str, to_retry); for i in 0..to_retry { sleep(std::time::Duration::from_secs(1)); if let Ok(item) = self.objects.1.try_recv() { log::trace!("get ok:{}", str); return (Some(Reusable::new(&self, item)), false); } } log::trace!("get should sleep :{}", str); self.waiting.lock().push(WaitingInfo { id: String::from(str), notifier: sender.clone(), min_request: releasable, }); (None, true) }; ret } #[inline] pub fn attach(&'static self, t: T) { let _x = self.run_block.lock(); log::trace!("attach started<<<<<<<<<<<<<<<<"); log::trace!("recyled an item "); let mut wait_list = { self.waiting.lock() }; log::trace!("check waiting list ok :{}", wait_list.len()); if wait_list.len() > 0 && self.len() >= wait_list[0].min_request { log::trace!("remove ok<<<<<<<<<<<<<<< "); let item = wait_list.remove(0); log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<"); //&wait_list.remove(0); self.objects.0.send(t).unwrap(); log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request); for i in 0..item.min_request + 1 { item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{ log::warn!("notifier send failed"); }); } drop(item); // thread::spawn(move || { // item.notifier.send(()).unwrap(); // }); } else if self.pending.lock().len() > 0 { drop(wait_list); let pending_item = self.pending.lock().remove(0); log::trace!("fill pending:{}", pending_item.id); // thread::spawn(move || { // pending_item.notifier.send(()); // }); pending_item.notifier.send(Reusable::new(&self, t)); } else { // drop(wait_list); self.objects.0.send(t).unwrap(); log::trace!("push to queue:{}", self.len()); } } } pub struct Reusable<T> where T: Sync + Send +'static { pool: &'static MemoryPool<T>, data: ManuallyDrop<T>, } impl<T> Reusable<T> where T: Sync + Send +'static { #[inline] pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self { Self { pool, data: ManuallyDrop::new(t), } } // #[inline] // pub fn detach(mut self) -> (&'a MemoryPool<T>, T) { // let ret = unsafe { (self.pool, self.take()) }; // forget(self); // ret // } // unsafe fn take(&mut self) -> T { ManuallyDrop::take(&mut self.data) } } impl<T> Deref for Reusable<T> where T: Sync + Send +'static { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.data } } impl<T> DerefMut for Reusable<T> where T: Sync + Send +'static { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.data } } impl<T> Drop for Reusable<T> where T: Sync + Send +'static { #[inline] fn drop(&mut self) { unsafe { self.pool.attach(self.take()); } } } #[cfg(test)] mod tests { use crate::{MemoryPool, Reusable}; use std::mem::drop; use std::ops::DerefMut; use std::thread; use std::sync::Arc; // #[test] // fn pull() { // let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new())); // let pool2 = pool.clone(); // let t1 = thread::spawn(move ||{ // let object1 = pool.lock().pull(); // //println!("retain 1"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object2 = pool.pull(); // //println!("retain 2"); // thread::sleep(std::time::Duration::from_secs(1)); // // let object3 = pool.pull(); // //println!("retain 3"); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 1"); // drop(object1); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 2"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // //println!("drop 3"); // drop(object3); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // let t2 = thread::spawn(move ||{ // //println!(">>>wait for 2.5s"); // thread::sleep(std::time::Duration::from_millis(2500)); // //println!(">>>try to retain 1....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 2....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // //println!(">>>try to retain 3....."); // let object2 = pool2.pull(); // //println!(">>>retained 1"); // // thread::sleep(std::time::Duration::from_secs(1)); // // //println!(">>>dropped"); // drop(object2); // thread::sleep(std::time::Duration::from_secs(1)); // // }); // t1.join(); // t2.join(); // // } #[test] fn e2e() { // let pool = MemoryPool::new(10, || Vec::new()); // let mut objects = Vec::new(); // // thread::spawn(||{ // for i in 0..10 { // let mut object = pool.pull(); // } // }); // // // // drop(objects); // // // for i in 10..0 { // let mut object = pool.objects.lock().pop().unwrap(); // assert_eq!(object.pop(), Some(i)); // } } }
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096)); //! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated //! reusable_buff.clear(); // clear the buff before using
random_line_split
counter.rs
use std::ffi::CString; use std::io; use std::sync::{Mutex, Once}; #[cfg(target_os = "freebsd")] use libc::EDOOFUS; #[cfg(target_os = "freebsd")] use pmc_sys::{ pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC, pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop, }; #[cfg(not(target_os = "freebsd"))] use super::stubs::*; use crate::CPU_ANY; use crate::{ error::{new_error, new_os_error, Error, ErrorKind}, signal, }; static PMC_INIT: Once = Once::new(); lazy_static! { static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42); } /// Configure event counter parameters. /// /// Unless specified, a counter is allocated in counting mode with a system-wide /// scope, recording events across all CPUs. /// /// ```no_run /// let config = CounterConfig::default().attach_to(vec![0]); /// /// let instr = config.allocate("inst_retired.any")?; /// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?; /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug, Default, Clone)] pub struct CounterBuilder { cpu: Option<i32>, pids: Option<Vec<i32>>, } impl CounterBuilder { /// Specify the CPU number that the PMC is to be allocated on. /// /// Defaults to all CPUs ([`CPU_ANY`]). pub fn set_cpu(self, cpu: i32) -> Self { Self { cpu: Some(cpu), ..self } } /// Attach a counter to the specified PID(s). /// /// When set, this causes the PMC to be allocated in process-scoped counting /// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`). /// /// # PID 0 /// /// PID 0 is a magic value, attaching to it causes the counter to be /// attached to the current (caller's) PID. pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self { Self { pids: Some(pids.into()), ..self } } /// Allocate a PMC with the specified configuration, and attach to the /// target PIDs (if any). pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> { Counter::new(event_spec, self.cpu, self.pids.clone()) } } #[derive(Debug)] struct AttachHandle { id: pmc_id_t, pid: i32, } impl Drop for AttachHandle { fn drop(&mut self) { // BUG: do not attempt to detach from pid 0 or risk live-locking the // machine. // // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041 // if self.pid!= 0 { unsafe { pmc_detach(self.id, self.pid) }; } } } /// A handle to a running PMC counter. /// /// Dropping this handle causes the counter to stop recording events. pub struct Running<'a> { counter: &'a mut Counter, } impl<'a> Running<'a> { /// Read the current counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = counter.start()?; /// /// println!("instructions: {}", handle.read()?); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { self.counter.read() } /// Set the value of the counter. pub fn set(&mut self, value: u64) -> Result<u64, Error> { self.counter.set(value) } /// Stop the counter from recording new events. pub fn stop(self) { drop(self) } } impl<'a> std::fmt::Display for Running<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.counter.fmt(f) } } impl<'a> Drop for Running<'a> { fn
(&mut self) { unsafe { pmc_stop(self.counter.id) }; } } /// An allocated PMC counter. /// /// Counters are initialised using the [`CounterBuilder`] type. /// /// ```no_run /// use std::{thread, time::Duration}; /// /// let instr = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = instr.start()?; /// /// // Stop the counter after 5 seconds /// thread::sleep(Duration::from_secs(5)); /// handle.stop(); /// /// println!("instructions: {}", instr.read()?); /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug)] pub struct Counter { id: pmc_id_t, attached: Option<Vec<AttachHandle>>, } impl Counter { fn new( event_spec: impl Into<String>, cpu: Option<i32>, pids: Option<Vec<i32>>, ) -> Result<Self, Error> { // If there's any pids, request a process counter, otherwise a // system-wide counter. let pmc_mode = if pids.is_none() { pmc_mode_PMC_MODE_SC } else { pmc_mode_PMC_MODE_TC }; // It appears pmc_allocate isn't thread safe, so take a lock while // calling it. let _guard = BIG_FAT_LOCK.lock().unwrap(); init_pmc_once()?; signal::check()?; let c_spec = CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?; // Allocate the PMC let mut id = 0; if unsafe { pmc_allocate( c_spec.as_ptr(), pmc_mode, 0, cpu.unwrap_or(CPU_ANY), &mut id, 0, ) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } // Initialise the counter so dropping it releases the PMC let mut c = Counter { id, attached: None }; // Attach to pids, if any, and collect handles so dropping them later // causes them to detach. // // The handles MUST be dropped before the Counter instance. if let Some(pids) = pids { let mut handles = vec![]; for pid in pids { if unsafe { pmc_attach(id, pid) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EBUSY) => unreachable!(), Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)), Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)), Some(libc::EINVAL) | Some(libc::ESRCH) => { Err(new_os_error(ErrorKind::BadTarget)) } _ => Err(new_os_error(ErrorKind::Unknown)), }; } handles.push(AttachHandle { id, pid }) } c.attached = Some(handles) } Ok(c) } /// Start this counter. /// /// The counter stops when the returned [`Running`] handle is dropped. #[must_use = "counter only runs until handle is dropped"] pub fn start(&mut self) -> Result<Running<'_>, Error> { signal::check()?; if unsafe { pmc_start(self.id) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(Running { counter: self }) } /// Read the counter value. /// /// This call is valid for both running, stopped, and unused counters. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.read()?; /// let r2 = counter.read()?; /// /// // A counter that is not running does not advance /// assert!(r2 == r1); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { signal::check()?; let mut value: u64 = 0; if unsafe { pmc_read(self.id, &mut value) }!= 0 { return Err(new_os_error(ErrorKind::Unknown)); } Ok(value) } /// Set an explicit counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.set(42)?; /// // The previous value is returned when setting a new value /// assert_eq!(r1, 0); /// /// // Reading the counter returns the value set /// let r2 = counter.read()?; /// assert_eq!(r2, 42); /// # /// # Ok::<(), Error>(()) /// ``` pub fn set(&mut self, value: u64) -> Result<u64, Error> { signal::check()?; let mut old: u64 = 0; if unsafe { pmc_rw(self.id, value, &mut old) }!= 0 { let err = io::Error::last_os_error(); return match io::Error::raw_os_error(&err) { Some(libc::EBUSY) => panic!("{}", err.to_string()), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(old) } } impl std::fmt::Display for Counter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.read() { Ok(v) => write!(f, "{}", v), Err(e) => write!(f, "error: {}", e), } } } impl Drop for Counter { fn drop(&mut self) { let _guard = BIG_FAT_LOCK.lock().unwrap(); // The handles MUST be dropped before the Counter instance self.attached = None; unsafe { pmc_release(self.id); } } } fn init_pmc_once() -> Result<(), Error> { let mut maybe_err = Ok(()); PMC_INIT.call_once(|| { if unsafe { pmc_init() }!= 0 { maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)), Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)), _ => Err(new_os_error(ErrorKind::Unknown)), }; return; } // Register the signal handler signal::watch_for(&[libc::SIGBUS, libc::SIGIO]); }); maybe_err }
drop
identifier_name
counter.rs
use std::ffi::CString; use std::io; use std::sync::{Mutex, Once}; #[cfg(target_os = "freebsd")] use libc::EDOOFUS; #[cfg(target_os = "freebsd")] use pmc_sys::{ pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC, pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop, }; #[cfg(not(target_os = "freebsd"))] use super::stubs::*; use crate::CPU_ANY; use crate::{ error::{new_error, new_os_error, Error, ErrorKind}, signal, }; static PMC_INIT: Once = Once::new(); lazy_static! { static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42); } /// Configure event counter parameters. /// /// Unless specified, a counter is allocated in counting mode with a system-wide /// scope, recording events across all CPUs. /// /// ```no_run /// let config = CounterConfig::default().attach_to(vec![0]); /// /// let instr = config.allocate("inst_retired.any")?; /// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?; /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug, Default, Clone)] pub struct CounterBuilder { cpu: Option<i32>, pids: Option<Vec<i32>>, } impl CounterBuilder { /// Specify the CPU number that the PMC is to be allocated on. /// /// Defaults to all CPUs ([`CPU_ANY`]). pub fn set_cpu(self, cpu: i32) -> Self { Self { cpu: Some(cpu), ..self } } /// Attach a counter to the specified PID(s). /// /// When set, this causes the PMC to be allocated in process-scoped counting /// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`). /// /// # PID 0 /// /// PID 0 is a magic value, attaching to it causes the counter to be /// attached to the current (caller's) PID. pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self { Self { pids: Some(pids.into()), ..self } } /// Allocate a PMC with the specified configuration, and attach to the /// target PIDs (if any). pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> { Counter::new(event_spec, self.cpu, self.pids.clone()) } } #[derive(Debug)] struct AttachHandle { id: pmc_id_t, pid: i32, } impl Drop for AttachHandle { fn drop(&mut self) { // BUG: do not attempt to detach from pid 0 or risk live-locking the // machine. // // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041 // if self.pid!= 0 { unsafe { pmc_detach(self.id, self.pid) }; } } } /// A handle to a running PMC counter. /// /// Dropping this handle causes the counter to stop recording events. pub struct Running<'a> { counter: &'a mut Counter, } impl<'a> Running<'a> { /// Read the current counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = counter.start()?; /// /// println!("instructions: {}", handle.read()?); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { self.counter.read() } /// Set the value of the counter. pub fn set(&mut self, value: u64) -> Result<u64, Error> { self.counter.set(value) } /// Stop the counter from recording new events. pub fn stop(self) { drop(self) } } impl<'a> std::fmt::Display for Running<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.counter.fmt(f) } } impl<'a> Drop for Running<'a> { fn drop(&mut self) { unsafe { pmc_stop(self.counter.id) }; } } /// An allocated PMC counter. /// /// Counters are initialised using the [`CounterBuilder`] type. /// /// ```no_run /// use std::{thread, time::Duration}; /// /// let instr = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = instr.start()?; /// /// // Stop the counter after 5 seconds /// thread::sleep(Duration::from_secs(5)); /// handle.stop(); /// /// println!("instructions: {}", instr.read()?); /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug)] pub struct Counter { id: pmc_id_t, attached: Option<Vec<AttachHandle>>, } impl Counter { fn new( event_spec: impl Into<String>, cpu: Option<i32>, pids: Option<Vec<i32>>, ) -> Result<Self, Error> { // If there's any pids, request a process counter, otherwise a // system-wide counter. let pmc_mode = if pids.is_none() { pmc_mode_PMC_MODE_SC } else { pmc_mode_PMC_MODE_TC }; // It appears pmc_allocate isn't thread safe, so take a lock while // calling it. let _guard = BIG_FAT_LOCK.lock().unwrap(); init_pmc_once()?; signal::check()?; let c_spec = CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?; // Allocate the PMC let mut id = 0; if unsafe { pmc_allocate( c_spec.as_ptr(), pmc_mode, 0, cpu.unwrap_or(CPU_ANY), &mut id, 0, ) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } // Initialise the counter so dropping it releases the PMC let mut c = Counter { id, attached: None }; // Attach to pids, if any, and collect handles so dropping them later // causes them to detach. // // The handles MUST be dropped before the Counter instance. if let Some(pids) = pids { let mut handles = vec![]; for pid in pids { if unsafe { pmc_attach(id, pid) }!= 0
handles.push(AttachHandle { id, pid }) } c.attached = Some(handles) } Ok(c) } /// Start this counter. /// /// The counter stops when the returned [`Running`] handle is dropped. #[must_use = "counter only runs until handle is dropped"] pub fn start(&mut self) -> Result<Running<'_>, Error> { signal::check()?; if unsafe { pmc_start(self.id) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(Running { counter: self }) } /// Read the counter value. /// /// This call is valid for both running, stopped, and unused counters. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.read()?; /// let r2 = counter.read()?; /// /// // A counter that is not running does not advance /// assert!(r2 == r1); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { signal::check()?; let mut value: u64 = 0; if unsafe { pmc_read(self.id, &mut value) }!= 0 { return Err(new_os_error(ErrorKind::Unknown)); } Ok(value) } /// Set an explicit counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.set(42)?; /// // The previous value is returned when setting a new value /// assert_eq!(r1, 0); /// /// // Reading the counter returns the value set /// let r2 = counter.read()?; /// assert_eq!(r2, 42); /// # /// # Ok::<(), Error>(()) /// ``` pub fn set(&mut self, value: u64) -> Result<u64, Error> { signal::check()?; let mut old: u64 = 0; if unsafe { pmc_rw(self.id, value, &mut old) }!= 0 { let err = io::Error::last_os_error(); return match io::Error::raw_os_error(&err) { Some(libc::EBUSY) => panic!("{}", err.to_string()), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(old) } } impl std::fmt::Display for Counter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.read() { Ok(v) => write!(f, "{}", v), Err(e) => write!(f, "error: {}", e), } } } impl Drop for Counter { fn drop(&mut self) { let _guard = BIG_FAT_LOCK.lock().unwrap(); // The handles MUST be dropped before the Counter instance self.attached = None; unsafe { pmc_release(self.id); } } } fn init_pmc_once() -> Result<(), Error> { let mut maybe_err = Ok(()); PMC_INIT.call_once(|| { if unsafe { pmc_init() }!= 0 { maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)), Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)), _ => Err(new_os_error(ErrorKind::Unknown)), }; return; } // Register the signal handler signal::watch_for(&[libc::SIGBUS, libc::SIGIO]); }); maybe_err }
{ return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EBUSY) => unreachable!(), Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)), Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)), Some(libc::EINVAL) | Some(libc::ESRCH) => { Err(new_os_error(ErrorKind::BadTarget)) } _ => Err(new_os_error(ErrorKind::Unknown)), }; }
conditional_block
counter.rs
use std::ffi::CString; use std::io; use std::sync::{Mutex, Once}; #[cfg(target_os = "freebsd")] use libc::EDOOFUS; #[cfg(target_os = "freebsd")] use pmc_sys::{ pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC, pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop, }; #[cfg(not(target_os = "freebsd"))] use super::stubs::*; use crate::CPU_ANY; use crate::{ error::{new_error, new_os_error, Error, ErrorKind}, signal, }; static PMC_INIT: Once = Once::new(); lazy_static! { static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42); } /// Configure event counter parameters. /// /// Unless specified, a counter is allocated in counting mode with a system-wide /// scope, recording events across all CPUs. /// /// ```no_run /// let config = CounterConfig::default().attach_to(vec![0]); /// /// let instr = config.allocate("inst_retired.any")?; /// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?; /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug, Default, Clone)] pub struct CounterBuilder { cpu: Option<i32>, pids: Option<Vec<i32>>, } impl CounterBuilder { /// Specify the CPU number that the PMC is to be allocated on. /// /// Defaults to all CPUs ([`CPU_ANY`]). pub fn set_cpu(self, cpu: i32) -> Self { Self { cpu: Some(cpu), ..self } } /// Attach a counter to the specified PID(s). /// /// When set, this causes the PMC to be allocated in process-scoped counting /// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`). /// /// # PID 0 /// /// PID 0 is a magic value, attaching to it causes the counter to be /// attached to the current (caller's) PID. pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self { Self { pids: Some(pids.into()), ..self } } /// Allocate a PMC with the specified configuration, and attach to the /// target PIDs (if any). pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> { Counter::new(event_spec, self.cpu, self.pids.clone()) } } #[derive(Debug)] struct AttachHandle { id: pmc_id_t, pid: i32, } impl Drop for AttachHandle { fn drop(&mut self) { // BUG: do not attempt to detach from pid 0 or risk live-locking the // machine. // // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041 // if self.pid!= 0 { unsafe { pmc_detach(self.id, self.pid) }; } } } /// A handle to a running PMC counter. /// /// Dropping this handle causes the counter to stop recording events. pub struct Running<'a> { counter: &'a mut Counter, } impl<'a> Running<'a> { /// Read the current counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = counter.start()?; /// /// println!("instructions: {}", handle.read()?); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { self.counter.read() } /// Set the value of the counter. pub fn set(&mut self, value: u64) -> Result<u64, Error> { self.counter.set(value) } /// Stop the counter from recording new events. pub fn stop(self) { drop(self) } } impl<'a> std::fmt::Display for Running<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.counter.fmt(f) } } impl<'a> Drop for Running<'a> { fn drop(&mut self) { unsafe { pmc_stop(self.counter.id) }; } } /// An allocated PMC counter. /// /// Counters are initialised using the [`CounterBuilder`] type. /// /// ```no_run /// use std::{thread, time::Duration}; /// /// let instr = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let handle = instr.start()?; /// /// // Stop the counter after 5 seconds /// thread::sleep(Duration::from_secs(5)); /// handle.stop(); /// /// println!("instructions: {}", instr.read()?); /// # /// # Ok::<(), Error>(()) /// ``` #[derive(Debug)] pub struct Counter { id: pmc_id_t, attached: Option<Vec<AttachHandle>>, } impl Counter { fn new( event_spec: impl Into<String>, cpu: Option<i32>, pids: Option<Vec<i32>>, ) -> Result<Self, Error> { // If there's any pids, request a process counter, otherwise a // system-wide counter. let pmc_mode = if pids.is_none() { pmc_mode_PMC_MODE_SC } else { pmc_mode_PMC_MODE_TC }; // It appears pmc_allocate isn't thread safe, so take a lock while // calling it. let _guard = BIG_FAT_LOCK.lock().unwrap(); init_pmc_once()?; signal::check()?;
// Allocate the PMC let mut id = 0; if unsafe { pmc_allocate( c_spec.as_ptr(), pmc_mode, 0, cpu.unwrap_or(CPU_ANY), &mut id, 0, ) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } // Initialise the counter so dropping it releases the PMC let mut c = Counter { id, attached: None }; // Attach to pids, if any, and collect handles so dropping them later // causes them to detach. // // The handles MUST be dropped before the Counter instance. if let Some(pids) = pids { let mut handles = vec![]; for pid in pids { if unsafe { pmc_attach(id, pid) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::EBUSY) => unreachable!(), Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)), Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)), Some(libc::EINVAL) | Some(libc::ESRCH) => { Err(new_os_error(ErrorKind::BadTarget)) } _ => Err(new_os_error(ErrorKind::Unknown)), }; } handles.push(AttachHandle { id, pid }) } c.attached = Some(handles) } Ok(c) } /// Start this counter. /// /// The counter stops when the returned [`Running`] handle is dropped. #[must_use = "counter only runs until handle is dropped"] pub fn start(&mut self) -> Result<Running<'_>, Error> { signal::check()?; if unsafe { pmc_start(self.id) }!= 0 { return match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(Running { counter: self }) } /// Read the counter value. /// /// This call is valid for both running, stopped, and unused counters. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.read()?; /// let r2 = counter.read()?; /// /// // A counter that is not running does not advance /// assert!(r2 == r1); /// # /// # Ok::<(), Error>(()) /// ``` pub fn read(&self) -> Result<u64, Error> { signal::check()?; let mut value: u64 = 0; if unsafe { pmc_read(self.id, &mut value) }!= 0 { return Err(new_os_error(ErrorKind::Unknown)); } Ok(value) } /// Set an explicit counter value. /// /// ```no_run /// let mut counter = CounterConfig::default() /// .attach_to(vec![0]) /// .allocate("inst_retired.any")?; /// /// let r1 = counter.set(42)?; /// // The previous value is returned when setting a new value /// assert_eq!(r1, 0); /// /// // Reading the counter returns the value set /// let r2 = counter.read()?; /// assert_eq!(r2, 42); /// # /// # Ok::<(), Error>(()) /// ``` pub fn set(&mut self, value: u64) -> Result<u64, Error> { signal::check()?; let mut old: u64 = 0; if unsafe { pmc_rw(self.id, value, &mut old) }!= 0 { let err = io::Error::last_os_error(); return match io::Error::raw_os_error(&err) { Some(libc::EBUSY) => panic!("{}", err.to_string()), _ => Err(new_os_error(ErrorKind::Unknown)), }; } Ok(old) } } impl std::fmt::Display for Counter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.read() { Ok(v) => write!(f, "{}", v), Err(e) => write!(f, "error: {}", e), } } } impl Drop for Counter { fn drop(&mut self) { let _guard = BIG_FAT_LOCK.lock().unwrap(); // The handles MUST be dropped before the Counter instance self.attached = None; unsafe { pmc_release(self.id); } } } fn init_pmc_once() -> Result<(), Error> { let mut maybe_err = Ok(()); PMC_INIT.call_once(|| { if unsafe { pmc_init() }!= 0 { maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) { Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)), Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)), Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)), _ => Err(new_os_error(ErrorKind::Unknown)), }; return; } // Register the signal handler signal::watch_for(&[libc::SIGBUS, libc::SIGIO]); }); maybe_err }
let c_spec = CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?;
random_line_split
mod.rs
use rstd::prelude::*; use codec::{Encode, Decode}; use support::{ StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure, traits::{ Currency, ReservableCurrency, OnFreeBalanceZero, OnUnbalanced, WithdrawReason, ExistenceRequirement, Imbalance, Get, }, dispatch::Result, }; use sr_primitives::{ transaction_validity::{ TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, TransactionValidity, }, traits::{ Zero, CheckedAdd, CheckedSub, Saturating, SignedExtension, SaturatedConversion, Convert, }, weights::{DispatchInfo, SimpleDispatchInfo, Weight}, }; use system::{OnNewAccount, ensure_signed}; use crate::non_transfer_asset::SustainableCurrency; /// Trait for activity pub trait ActivityInterface<AccountId, Balance> { fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result; } /// The module's configuration trait. pub trait Trait: system::Trait { /// Currency type for this module. type Currency: ReservableCurrency<Self::AccountId>; /// Energy type for this module type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>; /// Action point type for this module type ActivityCurrency: Currency<Self::AccountId>; /// Reputation point type for this module type ReputationCurrency: Currency<Self::AccountId>; /// Handler for the unbalanced reduction when taking transaction fees. type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>; /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; /// The fee to be paid for making a transaction; the base. type TransactionBaseFee: Get<BalanceOf<Self>>; /// The fee to be paid for making a transaction; the per-byte portion. type TransactionByteFee: Get<BalanceOf<Self>>; /// The base Energy amount of activated account type EnergyBaseAmount: Get<EnergyOf<Self>>; /// Convert a weight value into a deductible fee based on the currency type. type WeightToFee: Convert<Weight, BalanceOf<Self>>; /// Convert a fee value to energy point type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert a charging value to energy point type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert an energy point to fee value type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>; /// Convert an energy point to locking block number type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>; /// Convert an energy point to action point type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>; /// Convert an action point to reputation type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>; } // Balance zone pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance; type NegativeImbalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance; // Energy zone pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Action zone pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Reputation zone pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // This module's storage items. decl_storage! { trait Store for Module<T: Trait> as Activities { /// Map from all extend pub Charged get(charged): map T::AccountId => BalanceOf<T>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, Balance = BalanceOf<T>, Energy = EnergyOf<T>, Reputation = ReputationOf<T> { // Fee payment FeePayed(AccountId, Energy, Balance), EnergyRecovered(AccountId, Energy), // Reputation part ReputationReward(AccountId, Reputation), ReputationSlash(AccountId, Reputation), } ); // The module's dispatchable functions. decl_module! { /// The module declaration. pub struct Module<T: Trait> for enum Call where origin: T::Origin { // Initializing events fn deposit_event() = default; /// Bond to increase Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn charge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::charge_for_energy(&who, value)?; } /// UnBond to decrease Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn discharge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::discharge_for_energy(&who, value)?; } } } // The module's main implement impl<T: Trait> Module<T> { // PUBLIC IMMUTABLES pub fn available_energy(who: &T::AccountId) -> EnergyOf<T> { T::EnergyCurrency::available_free_balance(who) } // PRIVATE MUTABLES fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // ensure reserve if!T::Currency::can_reserve(who, value) { return Err("not enough free funds"); } // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?; let energy_to_charge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?; // MUTABLES T::Currency::reserve(who, value)?; T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?; <Charged<T>>::insert(who, new_charged); Ok(()) } fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?; let energy_to_discharge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?; // MUTABLES T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::Currency::unreserve(who, value); <Charged<T>>::insert(who, new_charged); Ok(()) } } impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> { // Implementation of the config type managing the creation of new accounts. fn on_new_account(who: &T::AccountId) { T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get()); } } impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> { fn on_free_balance_zero(who: &T::AccountId) { let dust = <Charged<T>>::take(who); if!dust.is_zero() { T::Currency::unreserve(who, dust); } T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who)); } } impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> { // do admire fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result { let earned_rp = T::ActionPointToReputation::convert(cap.clone()); ensure!(!earned_rp.is_zero(), "action point too low "); T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap(); Ok(()) } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct TakeFees<T: Trait>(#[codec(compact)] BalanceOf<T>); impl<T: Trait> TakeFees<T> { /// utility constructor. Used only in client/factory code. pub fn from(fee: BalanceOf<T>) -> Self { Self(fee) } /// Compute the final fee value for a particular transaction. /// /// The final fee is composed of: /// - _length-fee_: This is the amount paid merely to pay for size of the transaction. /// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike /// size-fee, this is not input dependent and reflects the _complexity_ of the execution /// and the time it consumes. /// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed /// transactions can have a tip. fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> { let len_fee = if info.pay_length_fee() { let len = <BalanceOf<T> as From<u32>>::from(len as u32); let base = T::TransactionBaseFee::get(); let per_byte = T::TransactionByteFee::get(); base.saturating_add(per_byte.saturating_mul(len)) } else { Zero::zero() }; let weight_fee = { // cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded` // maximum of its data type, which is not desired. let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get()); let weight_update = <system::Module<T>>::next_weight_multiplier(); let adjusted_weight = weight_update.apply_to(capped_weight); T::WeightToFee::convert(adjusted_weight) }; len_fee.saturating_add(weight_fee).saturating_add(tip) } } #[cfg(feature = "std")] impl<T: Trait> rstd::fmt::Debug for TakeFees<T> { fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result { self.0.fmt(f) } } impl<T: Trait> SignedExtension for TakeFees<T> where BalanceOf<T>: core::marker::Send + core::marker::Sync { type AccountId = <T as system::Trait>::AccountId; type Call = T::Call; type AdditionalSigned = (); type Pre = (); fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) } fn validate( &self, who: &Self::AccountId, _call: &Self::Call, info: DispatchInfo, len: usize, ) -> TransactionValidity { let fee = Self::compute_fee(len, info, self.0); // pay fees. // first use energy, second use balance let required_energy = T::FeeToEnergy::convert(fee); let available_energy = T::EnergyCurrency::available_free_balance(who); let using_energy = required_energy.min(available_energy); let mut using_fee = BalanceOf::<T>::zero(); if using_energy < required_energy { using_fee = T::EnergyToFee::convert(required_energy - using_energy); } let now = <system::Module<T>>::block_number(); let locking_block = T::EnergyToLocking::convert(using_energy); // lock energy and get unlocked energy let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) { Ok(result) => result, Err(_) => return InvalidTransaction::Payment.into(), }; // dispatch EnergyRecovered if!unlocked_energy.is_zero() { <Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy)); } let imbalance = match T::Currency::withdraw( who, using_fee.clone(), WithdrawReason::TransactionPayment, ExistenceRequirement::KeepAlive, ) { Ok(imbalance) => imbalance, Err(_) => return InvalidTransaction::Payment.into(), }; T::TransactionPayment::on_unbalanced(imbalance); // increate action point if!using_energy.is_zero() { let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone()); if!earned_ap.is_zero() { T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap(); } } // Send event <Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee)); let mut r = ValidTransaction::default(); // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which // will be a bit more than setting the priority to tip. For now, this is enough. r.priority = fee.saturated_into::<TransactionPriority>(); Ok(r) } }
//! # Activity Module //! #![cfg_attr(not(feature = "std"), no_std)]
random_line_split
mod.rs
//! # Activity Module //! #![cfg_attr(not(feature = "std"), no_std)] use rstd::prelude::*; use codec::{Encode, Decode}; use support::{ StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure, traits::{ Currency, ReservableCurrency, OnFreeBalanceZero, OnUnbalanced, WithdrawReason, ExistenceRequirement, Imbalance, Get, }, dispatch::Result, }; use sr_primitives::{ transaction_validity::{ TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, TransactionValidity, }, traits::{ Zero, CheckedAdd, CheckedSub, Saturating, SignedExtension, SaturatedConversion, Convert, }, weights::{DispatchInfo, SimpleDispatchInfo, Weight}, }; use system::{OnNewAccount, ensure_signed}; use crate::non_transfer_asset::SustainableCurrency; /// Trait for activity pub trait ActivityInterface<AccountId, Balance> { fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result; } /// The module's configuration trait. pub trait Trait: system::Trait { /// Currency type for this module. type Currency: ReservableCurrency<Self::AccountId>; /// Energy type for this module type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>; /// Action point type for this module type ActivityCurrency: Currency<Self::AccountId>; /// Reputation point type for this module type ReputationCurrency: Currency<Self::AccountId>; /// Handler for the unbalanced reduction when taking transaction fees. type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>; /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; /// The fee to be paid for making a transaction; the base. type TransactionBaseFee: Get<BalanceOf<Self>>; /// The fee to be paid for making a transaction; the per-byte portion. type TransactionByteFee: Get<BalanceOf<Self>>; /// The base Energy amount of activated account type EnergyBaseAmount: Get<EnergyOf<Self>>; /// Convert a weight value into a deductible fee based on the currency type. type WeightToFee: Convert<Weight, BalanceOf<Self>>; /// Convert a fee value to energy point type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert a charging value to energy point type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert an energy point to fee value type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>; /// Convert an energy point to locking block number type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>; /// Convert an energy point to action point type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>; /// Convert an action point to reputation type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>; } // Balance zone pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance; type NegativeImbalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance; // Energy zone pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Action zone pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Reputation zone pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // This module's storage items. decl_storage! { trait Store for Module<T: Trait> as Activities { /// Map from all extend pub Charged get(charged): map T::AccountId => BalanceOf<T>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, Balance = BalanceOf<T>, Energy = EnergyOf<T>, Reputation = ReputationOf<T> { // Fee payment FeePayed(AccountId, Energy, Balance), EnergyRecovered(AccountId, Energy), // Reputation part ReputationReward(AccountId, Reputation), ReputationSlash(AccountId, Reputation), } ); // The module's dispatchable functions. decl_module! { /// The module declaration. pub struct Module<T: Trait> for enum Call where origin: T::Origin { // Initializing events fn deposit_event() = default; /// Bond to increase Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn charge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::charge_for_energy(&who, value)?; } /// UnBond to decrease Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn discharge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::discharge_for_energy(&who, value)?; } } } // The module's main implement impl<T: Trait> Module<T> { // PUBLIC IMMUTABLES pub fn available_energy(who: &T::AccountId) -> EnergyOf<T> { T::EnergyCurrency::available_free_balance(who) } // PRIVATE MUTABLES fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // ensure reserve if!T::Currency::can_reserve(who, value) { return Err("not enough free funds"); } // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?; let energy_to_charge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?; // MUTABLES T::Currency::reserve(who, value)?; T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?; <Charged<T>>::insert(who, new_charged); Ok(()) } fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?; let energy_to_discharge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?; // MUTABLES T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::Currency::unreserve(who, value); <Charged<T>>::insert(who, new_charged); Ok(()) } } impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> { // Implementation of the config type managing the creation of new accounts. fn on_new_account(who: &T::AccountId) { T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get()); } } impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> { fn on_free_balance_zero(who: &T::AccountId) { let dust = <Charged<T>>::take(who); if!dust.is_zero() { T::Currency::unreserve(who, dust); } T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who)); } } impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> { // do admire fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result { let earned_rp = T::ActionPointToReputation::convert(cap.clone()); ensure!(!earned_rp.is_zero(), "action point too low "); T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap(); Ok(()) } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct
<T: Trait>(#[codec(compact)] BalanceOf<T>); impl<T: Trait> TakeFees<T> { /// utility constructor. Used only in client/factory code. pub fn from(fee: BalanceOf<T>) -> Self { Self(fee) } /// Compute the final fee value for a particular transaction. /// /// The final fee is composed of: /// - _length-fee_: This is the amount paid merely to pay for size of the transaction. /// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike /// size-fee, this is not input dependent and reflects the _complexity_ of the execution /// and the time it consumes. /// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed /// transactions can have a tip. fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> { let len_fee = if info.pay_length_fee() { let len = <BalanceOf<T> as From<u32>>::from(len as u32); let base = T::TransactionBaseFee::get(); let per_byte = T::TransactionByteFee::get(); base.saturating_add(per_byte.saturating_mul(len)) } else { Zero::zero() }; let weight_fee = { // cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded` // maximum of its data type, which is not desired. let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get()); let weight_update = <system::Module<T>>::next_weight_multiplier(); let adjusted_weight = weight_update.apply_to(capped_weight); T::WeightToFee::convert(adjusted_weight) }; len_fee.saturating_add(weight_fee).saturating_add(tip) } } #[cfg(feature = "std")] impl<T: Trait> rstd::fmt::Debug for TakeFees<T> { fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result { self.0.fmt(f) } } impl<T: Trait> SignedExtension for TakeFees<T> where BalanceOf<T>: core::marker::Send + core::marker::Sync { type AccountId = <T as system::Trait>::AccountId; type Call = T::Call; type AdditionalSigned = (); type Pre = (); fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) } fn validate( &self, who: &Self::AccountId, _call: &Self::Call, info: DispatchInfo, len: usize, ) -> TransactionValidity { let fee = Self::compute_fee(len, info, self.0); // pay fees. // first use energy, second use balance let required_energy = T::FeeToEnergy::convert(fee); let available_energy = T::EnergyCurrency::available_free_balance(who); let using_energy = required_energy.min(available_energy); let mut using_fee = BalanceOf::<T>::zero(); if using_energy < required_energy { using_fee = T::EnergyToFee::convert(required_energy - using_energy); } let now = <system::Module<T>>::block_number(); let locking_block = T::EnergyToLocking::convert(using_energy); // lock energy and get unlocked energy let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) { Ok(result) => result, Err(_) => return InvalidTransaction::Payment.into(), }; // dispatch EnergyRecovered if!unlocked_energy.is_zero() { <Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy)); } let imbalance = match T::Currency::withdraw( who, using_fee.clone(), WithdrawReason::TransactionPayment, ExistenceRequirement::KeepAlive, ) { Ok(imbalance) => imbalance, Err(_) => return InvalidTransaction::Payment.into(), }; T::TransactionPayment::on_unbalanced(imbalance); // increate action point if!using_energy.is_zero() { let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone()); if!earned_ap.is_zero() { T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap(); } } // Send event <Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee)); let mut r = ValidTransaction::default(); // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which // will be a bit more than setting the priority to tip. For now, this is enough. r.priority = fee.saturated_into::<TransactionPriority>(); Ok(r) } }
TakeFees
identifier_name
mod.rs
//! # Activity Module //! #![cfg_attr(not(feature = "std"), no_std)] use rstd::prelude::*; use codec::{Encode, Decode}; use support::{ StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure, traits::{ Currency, ReservableCurrency, OnFreeBalanceZero, OnUnbalanced, WithdrawReason, ExistenceRequirement, Imbalance, Get, }, dispatch::Result, }; use sr_primitives::{ transaction_validity::{ TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, TransactionValidity, }, traits::{ Zero, CheckedAdd, CheckedSub, Saturating, SignedExtension, SaturatedConversion, Convert, }, weights::{DispatchInfo, SimpleDispatchInfo, Weight}, }; use system::{OnNewAccount, ensure_signed}; use crate::non_transfer_asset::SustainableCurrency; /// Trait for activity pub trait ActivityInterface<AccountId, Balance> { fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result; } /// The module's configuration trait. pub trait Trait: system::Trait { /// Currency type for this module. type Currency: ReservableCurrency<Self::AccountId>; /// Energy type for this module type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>; /// Action point type for this module type ActivityCurrency: Currency<Self::AccountId>; /// Reputation point type for this module type ReputationCurrency: Currency<Self::AccountId>; /// Handler for the unbalanced reduction when taking transaction fees. type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>; /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; /// The fee to be paid for making a transaction; the base. type TransactionBaseFee: Get<BalanceOf<Self>>; /// The fee to be paid for making a transaction; the per-byte portion. type TransactionByteFee: Get<BalanceOf<Self>>; /// The base Energy amount of activated account type EnergyBaseAmount: Get<EnergyOf<Self>>; /// Convert a weight value into a deductible fee based on the currency type. type WeightToFee: Convert<Weight, BalanceOf<Self>>; /// Convert a fee value to energy point type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert a charging value to energy point type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>; /// Convert an energy point to fee value type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>; /// Convert an energy point to locking block number type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>; /// Convert an energy point to action point type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>; /// Convert an action point to reputation type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>; } // Balance zone pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance; type NegativeImbalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance; // Energy zone pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Action zone pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // Reputation zone pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance; // This module's storage items. decl_storage! { trait Store for Module<T: Trait> as Activities { /// Map from all extend pub Charged get(charged): map T::AccountId => BalanceOf<T>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, Balance = BalanceOf<T>, Energy = EnergyOf<T>, Reputation = ReputationOf<T> { // Fee payment FeePayed(AccountId, Energy, Balance), EnergyRecovered(AccountId, Energy), // Reputation part ReputationReward(AccountId, Reputation), ReputationSlash(AccountId, Reputation), } ); // The module's dispatchable functions. decl_module! { /// The module declaration. pub struct Module<T: Trait> for enum Call where origin: T::Origin { // Initializing events fn deposit_event() = default; /// Bond to increase Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn charge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::charge_for_energy(&who, value)?; } /// UnBond to decrease Energy #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] pub fn discharge( origin, #[compact] value: BalanceOf<T> ) { let who = ensure_signed(origin)?; Self::discharge_for_energy(&who, value)?; } } } // The module's main implement impl<T: Trait> Module<T> { // PUBLIC IMMUTABLES pub fn available_energy(who: &T::AccountId) -> EnergyOf<T>
// PRIVATE MUTABLES fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // ensure reserve if!T::Currency::can_reserve(who, value) { return Err("not enough free funds"); } // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?; let energy_to_charge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?; // MUTABLES T::Currency::reserve(who, value)?; T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?; <Charged<T>>::insert(who, new_charged); Ok(()) } fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result { // check current_charged let current_charged = <Charged<T>>::get(who); let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?; let energy_to_discharge = T::ChargingToEnergy::convert(value); let current_energy = T::EnergyCurrency::free_balance(who); current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?; // MUTABLES T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::Currency::unreserve(who, value); <Charged<T>>::insert(who, new_charged); Ok(()) } } impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> { // Implementation of the config type managing the creation of new accounts. fn on_new_account(who: &T::AccountId) { T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get()); } } impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> { fn on_free_balance_zero(who: &T::AccountId) { let dust = <Charged<T>>::take(who); if!dust.is_zero() { T::Currency::unreserve(who, dust); } T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who)); } } impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> { // do admire fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result { let earned_rp = T::ActionPointToReputation::convert(cap.clone()); ensure!(!earned_rp.is_zero(), "action point too low "); T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?; T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap(); Ok(()) } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct TakeFees<T: Trait>(#[codec(compact)] BalanceOf<T>); impl<T: Trait> TakeFees<T> { /// utility constructor. Used only in client/factory code. pub fn from(fee: BalanceOf<T>) -> Self { Self(fee) } /// Compute the final fee value for a particular transaction. /// /// The final fee is composed of: /// - _length-fee_: This is the amount paid merely to pay for size of the transaction. /// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike /// size-fee, this is not input dependent and reflects the _complexity_ of the execution /// and the time it consumes. /// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed /// transactions can have a tip. fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> { let len_fee = if info.pay_length_fee() { let len = <BalanceOf<T> as From<u32>>::from(len as u32); let base = T::TransactionBaseFee::get(); let per_byte = T::TransactionByteFee::get(); base.saturating_add(per_byte.saturating_mul(len)) } else { Zero::zero() }; let weight_fee = { // cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded` // maximum of its data type, which is not desired. let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get()); let weight_update = <system::Module<T>>::next_weight_multiplier(); let adjusted_weight = weight_update.apply_to(capped_weight); T::WeightToFee::convert(adjusted_weight) }; len_fee.saturating_add(weight_fee).saturating_add(tip) } } #[cfg(feature = "std")] impl<T: Trait> rstd::fmt::Debug for TakeFees<T> { fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result { self.0.fmt(f) } } impl<T: Trait> SignedExtension for TakeFees<T> where BalanceOf<T>: core::marker::Send + core::marker::Sync { type AccountId = <T as system::Trait>::AccountId; type Call = T::Call; type AdditionalSigned = (); type Pre = (); fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) } fn validate( &self, who: &Self::AccountId, _call: &Self::Call, info: DispatchInfo, len: usize, ) -> TransactionValidity { let fee = Self::compute_fee(len, info, self.0); // pay fees. // first use energy, second use balance let required_energy = T::FeeToEnergy::convert(fee); let available_energy = T::EnergyCurrency::available_free_balance(who); let using_energy = required_energy.min(available_energy); let mut using_fee = BalanceOf::<T>::zero(); if using_energy < required_energy { using_fee = T::EnergyToFee::convert(required_energy - using_energy); } let now = <system::Module<T>>::block_number(); let locking_block = T::EnergyToLocking::convert(using_energy); // lock energy and get unlocked energy let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) { Ok(result) => result, Err(_) => return InvalidTransaction::Payment.into(), }; // dispatch EnergyRecovered if!unlocked_energy.is_zero() { <Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy)); } let imbalance = match T::Currency::withdraw( who, using_fee.clone(), WithdrawReason::TransactionPayment, ExistenceRequirement::KeepAlive, ) { Ok(imbalance) => imbalance, Err(_) => return InvalidTransaction::Payment.into(), }; T::TransactionPayment::on_unbalanced(imbalance); // increate action point if!using_energy.is_zero() { let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone()); if!earned_ap.is_zero() { T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap(); } } // Send event <Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee)); let mut r = ValidTransaction::default(); // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which // will be a bit more than setting the priority to tip. For now, this is enough. r.priority = fee.saturated_into::<TransactionPriority>(); Ok(r) } }
{ T::EnergyCurrency::available_free_balance(who) }
identifier_body
lib.rs
} } } impl From<StoreError> for ProcessError { fn from(err: StoreError) -> Self { match err { StoreError::MissingDigest(s, d) => Self::MissingDigest(s, d), StoreError::Unclassified(s) => Self::Unclassified(s), } } } impl From<String> for ProcessError { fn from(err: String) -> Self { Self::Unclassified(err) } } #[derive( PartialOrd, Ord, Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize, Deserialize, )] #[allow(non_camel_case_types)] pub enum Platform { Macos_x86_64, Macos_arm64, Linux_x86_64, Linux_arm64, } impl Platform { pub fn current() -> Result<Platform, String> { let platform_info = uname::uname().map_err(|_| "Failed to get local platform info!".to_string())?; match platform_info { uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Linux_x86_64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && (machine.to_lowercase() == "arm64" || machine.to_lowercase() == "aarch64") => { Ok(Platform::Linux_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "arm64" => { Ok(Platform::Macos_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Macos_x86_64) } uname::Info { ref sysname, ref machine, .. } => Err(format!( "Found unknown system/arch name pair {sysname} {machine}" )), } } } impl From<Platform> for String { fn from(platform: Platform) -> String { match platform { Platform::Linux_x86_64 => "linux_x86_64".to_string(), Platform::Linux_arm64 => "linux_arm64".to_string(), Platform::Macos_arm64 => "macos_arm64".to_string(), Platform::Macos_x86_64 => "macos_x86_64".to_string(), } } } impl TryFrom<String> for Platform { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.as_ref() { "macos_arm64" => Ok(Platform::Macos_arm64), "macos_x86_64" => Ok(Platform::Macos_x86_64), "linux_x86_64" => Ok(Platform::Linux_x86_64), "linux_arm64" => Ok(Platform::Linux_arm64), other => Err(format!("Unknown platform {other:?} encountered in parsing")), } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize)] pub enum ProcessCacheScope { // Cached in all locations, regardless of success or failure. Always, // Cached in all locations, but only if the process exits successfully. Successful, // Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of // success vs. failure. PerRestartAlways, // Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if // successful. PerRestartSuccessful, // Will run once per Session, i.e. once per run of Pants. This happens because the engine // de-duplicates identical work; the process is neither memoized in memory nor cached to disk. PerSession, } impl TryFrom<String> for ProcessCacheScope { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.to_lowercase().as_ref() { "always" => Ok(ProcessCacheScope::Always), "successful" => Ok(ProcessCacheScope::Successful), "per_restart_always" => Ok(ProcessCacheScope::PerRestartAlways), "per_restart_successful" => Ok(ProcessCacheScope::PerRestartSuccessful), "per_session" => Ok(ProcessCacheScope::PerSession), other => Err(format!("Unknown Process cache scope: {other:?}")), } } } fn serialize_level<S: serde::Serializer>(level: &log::Level, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&level.to_string()) } /// Input Digests for a process execution. /// /// The `complete` and `nailgun` Digests are the computed union of various inputs. /// /// TODO: See `crate::local::prepare_workdir` regarding validation of overlapping inputs. #[derive(Clone, Debug, DeepSizeOf, Eq, Hash, PartialEq, Serialize)] pub struct InputDigests { /// All of the input Digests, merged and relativized. Runners without the ability to consume the /// Digests individually should directly consume this value. pub complete: DirectoryDigest, /// The merged Digest of any `use_nailgun`-relevant Digests. pub nailgun: DirectoryDigest, /// The input files for the process execution, which will be materialized as mutable inputs in a /// sandbox for the process. /// /// TODO: Rename to `inputs` for symmetry with `immutable_inputs`. pub input_files: DirectoryDigest, /// Immutable input digests to make available in the input root. /// /// These digests are intended for inputs that will be reused between multiple Process /// invocations, without being mutated. This might be useful to provide the tools being executed, /// but can also be used for tool inputs such as compilation artifacts. /// /// The digests will be mounted at the relative path represented by the `RelativePath` keys. /// The executor may choose how to make the digests available, including by just merging /// the digest normally into the input root, creating a symlink to a persistent cache, /// or bind mounting the directory read-only into a persistent cache. Consequently, the mount /// point of each input must not overlap the `input_files`, even for directory entries. /// /// Assumes the build action does not modify the Digest as made available. This may be /// enforced by an executor, for example by bind mounting the directory read-only. pub immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, /// If non-empty, use nailgun in supported runners, using the specified `immutable_inputs` keys /// as server inputs. All other keys (and the input_files) will be client inputs. pub use_nailgun: BTreeSet<RelativePath>, } impl InputDigests { pub async fn new( store: &Store, input_files: DirectoryDigest, immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, use_nailgun: BTreeSet<RelativePath>, ) -> Result<Self, StoreError> { // Collect all digests into `complete`. let mut complete_digests = try_join_all( immutable_inputs .iter() .map(|(path, digest)| store.add_prefix(digest.clone(), path)) .collect::<Vec<_>>(), ) .await?; // And collect only the subset of the Digests which impact nailgun into `nailgun`. let nailgun_digests = immutable_inputs .keys() .zip(complete_digests.iter()) .filter_map(|(path, digest)| { if use_nailgun.contains(path) { Some(digest.clone()) } else { None } }) .collect::<Vec<_>>(); complete_digests.push(input_files.clone()); let (complete, nailgun) = try_join!(store.merge(complete_digests), store.merge(nailgun_digests),)?; Ok(Self { complete: complete, nailgun: nailgun, input_files, immutable_inputs, use_nailgun, }) } pub async fn new_from_merged(store: &Store, from: Vec<InputDigests>) -> Result<Self, StoreError> { let mut merged_immutable_inputs = BTreeMap::new(); for input_digests in from.iter() { let size_before = merged_immutable_inputs.len(); let immutable_inputs = &input_digests.immutable_inputs; merged_immutable_inputs.append(&mut immutable_inputs.clone()); if size_before + immutable_inputs.len()!= merged_immutable_inputs.len() { return Err( format!( "Tried to merge two-or-more immutable inputs at the same path with different values! \ The collision involved one of the entries in: {immutable_inputs:?}" ) .into(), ); } } let complete_digests = from .iter() .map(|input_digests| input_digests.complete.clone()) .collect(); let nailgun_digests = from .iter() .map(|input_digests| input_digests.nailgun.clone()) .collect(); let input_files_digests = from .iter() .map(|input_digests| input_digests.input_files.clone()) .collect(); let (complete, nailgun, input_files) = try_join!( store.merge(complete_digests), store.merge(nailgun_digests), store.merge(input_files_digests), )?; Ok(Self { complete: complete, nailgun: nailgun, input_files: input_files, immutable_inputs: merged_immutable_inputs, use_nailgun: Itertools::concat( from .iter() .map(|input_digests| input_digests.use_nailgun.clone()), ) .into_iter() .collect(), }) } pub fn with_input_files(input_files: DirectoryDigest) -> Self { Self { complete: input_files.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files, immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } /// Split the InputDigests into client and server subsets. /// /// TODO: The server subset will have an accurate `complete` Digest, but the client will not. /// This is currently safe because the nailgun client code does not consume that field, but it /// would be good to find a better factoring. pub fn nailgun_client_and_server(&self) -> (InputDigests, InputDigests) { let (server, client) = self .immutable_inputs .clone() .into_iter() .partition(|(path, _digest)| self.use_nailgun.contains(path)); ( // Client. InputDigests { // TODO: See method doc. complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: self.input_files.clone(), immutable_inputs: client, use_nailgun: BTreeSet::new(), }, // Server. InputDigests { complete: self.nailgun.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: server, use_nailgun: BTreeSet::new(), }, ) } } impl Default for InputDigests { fn default() -> Self { Self { complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } } #[derive(DeepSizeOf, Debug, Clone, Hash, PartialEq, Eq, Serialize)] pub enum ProcessExecutionStrategy { Local, /// Stores the platform_properties. RemoteExecution(Vec<(String, String)>), /// Stores the image name. Docker(String), } impl ProcessExecutionStrategy { /// What to insert into the Command proto so that we don't incorrectly cache /// Docker vs remote execution vs local execution. pub fn cache_value(&self) -> String { match self { Self::Local => "local_execution".to_string(), Self::RemoteExecution(_) => "remote_execution".to_string(), // NB: this image will include the container ID, thanks to // https://github.com/pantsbuild/pants/pull/17101. Self::Docker(image) => format!("docker_execution: {image}"), } } } /// /// A process to be executed. /// /// When executing a `Process` using the `local::CommandRunner`, any `{chroot}` placeholders in the /// environment variables are replaced with the temporary sandbox path. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq, Serialize)] #[derivative(PartialEq, Hash)] pub struct Process { /// /// The arguments to execute. /// /// The first argument should be an absolute or relative path to the binary to execute. /// /// No PATH lookup will be performed unless a PATH environment variable is specified. /// /// No shell expansion will take place. /// pub argv: Vec<String>, /// /// The environment variables to set for the execution. /// /// No other environment variables will be set (except possibly for an empty PATH variable). /// pub env: BTreeMap<String, String>, /// /// A relative path to a directory existing in the `input_files` digest to execute the process /// from. Defaults to the `input_files` root. /// pub working_directory: Option<RelativePath>, /// /// All of the input digests for the process. /// pub input_digests: InputDigests, pub output_files: BTreeSet<RelativePath>, pub output_directories: BTreeSet<RelativePath>, pub timeout: Option<std::time::Duration>, /// If not None, then a bounded::CommandRunner executing this Process will set an environment /// variable with this name containing a unique execution slot number. pub execution_slot_variable: Option<String>, /// If non-zero, the amount of parallelism that this process is capable of given its inputs. This /// value does not directly set the number of cores allocated to the process: that is computed /// based on availability, and provided as a template value in the arguments of the process. /// /// When set, a `{pants_concurrency}` variable will be templated into the `argv` of the process. /// /// Processes which set this value may be preempted (i.e. canceled and restarted) for a short /// period after starting if available resources have changed (because other processes have /// started or finished). pub concurrency_available: usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub description: String, // NB: We serialize with a function to avoid adding a serde dep to the logging crate. #[serde(serialize_with = "serialize_level")] pub level: log::Level, /// /// Declares that this process uses the given named caches (which might have associated config /// in the future) at the associated relative paths within its workspace. Cache names must /// contain only lowercase ascii characters or underscores. /// /// Caches are exposed to processes within their workspaces at the relative paths represented /// by the values of the dict. A process may optionally check for the existence of the relevant /// directory, and disable use of that cache if it has not been created by the executor /// (indicating a lack of support for this feature). /// /// These caches are globally shared and so must be concurrency safe: a consumer of the cache /// must never assume that it has exclusive access to the provided directory. /// pub append_only_caches: BTreeMap<CacheName, RelativePath>, /// /// If present, a symlink will be created at.jdk which points to this directory for local /// execution, or a system-installed JDK (ignoring the value of the present Some) for remote /// execution. /// /// This is some technical debt we should clean up; /// see <https://github.com/pantsbuild/pants/issues/6416>. /// pub jdk_home: Option<PathBuf>, pub platform: Platform, pub cache_scope: ProcessCacheScope, pub execution_strategy: ProcessExecutionStrategy, pub remote_cache_speculation_delay: std::time::Duration, } impl Process { /// /// Constructs a Process with default values for most fields, after which the builder pattern can /// be used to set values. /// /// We use the more ergonomic (but possibly slightly slower) "move self for each builder method" /// pattern, so this method is only enabled for test usage: production usage should construct the /// Process struct wholesale. We can reconsider this if we end up with more production callsites /// that require partial options. /// #[cfg(test)] pub fn new(argv: Vec<String>) -> Process { Process { argv, env: BTreeMap::new(), working_directory: None, input_digests: InputDigests::default(), output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: None, description: "".to_string(), level: log::Level::Info, append_only_caches: BTreeMap::new(), jdk_home: None, platform: Platform::current().unwrap(), execution_slot_variable: None, concurrency_available: 0, cache_scope: ProcessCacheScope::Successful, execution_strategy: ProcessExecutionStrategy::Local, remote_cache_speculation_delay: std::time::Duration::from_millis(0), } } /// /// Replaces the environment for this process. /// pub fn env(mut self, env: BTreeMap<String, String>) -> Process { self.env = env; self } /// /// Replaces the working_directory for this process. /// pub fn working_directory(mut self, working_directory: Option<RelativePath>) -> Process { self.working_directory = working_directory; self } /// /// Replaces the output files for this process. /// pub fn output_files(mut self, output_files: BTreeSet<RelativePath>) -> Process { self.output_files = output_files; self } /// /// Replaces the output directories for this process. /// pub fn output_directories(mut self, output_directories: BTreeSet<RelativePath>) -> Process { self.output_directories = output_directories; self } /// /// Replaces the append only caches for this process. /// pub fn append_only_caches( mut self, append_only_caches: BTreeMap<CacheName, RelativePath>, ) -> Process { self.append_only_caches = append_only_caches; self } /// /// Set the execution strategy to Docker, with the specified image. /// pub fn docker(mut self, image: String) -> Process { self.execution_strategy = ProcessExecutionStrategy::Docker(image); self } /// /// Set the execution strategy to remote execution with the provided platform properties. /// pub fn remote_execution_platform_properties( mut self, properties: Vec<(String, String)>, ) -> Process { self.execution_strategy = ProcessExecutionStrategy::RemoteExecution(properties); self } pub fn remote_cache_speculation_delay(mut self, delay: std::time::Duration) -> Process { self.remote_cache_speculation_delay = delay; self } pub fn cache_scope(mut self, cache_scope: ProcessCacheScope) -> Process { self.cache_scope = cache_scope; self } } /// /// The result of running a process. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq)] #[derivative(PartialEq, Hash)] pub struct FallibleProcessResultWithPlatform { pub stdout_digest: Digest, pub stderr_digest: Digest, pub exit_code: i32, pub output_directory: DirectoryDigest, pub platform: Platform, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub metadata: ProcessResultMetadata, } /// Metadata for a ProcessResult corresponding to the REAPI `ExecutedActionMetadata` proto. This /// conversion is lossy, but the interesting parts are preserved. #[derive(Clone, Debug, DeepSizeOf, Eq, PartialEq)] pub struct ProcessResultMetadata { /// The time from starting to completion, including preparing the chroot and cleanup. /// Corresponds to `worker_start_timestamp` and `worker_completed_timestamp` from /// `ExecutedActionMetadata`. /// /// NB: This is optional because the REAPI does not guarantee that it is returned. pub total_elapsed: Option<Duration>, /// The source of the result. pub source: ProcessResultSource, /// The RunId of the Session in which the `ProcessResultSource` was accurate. In further runs /// within the same process, the source of the process implicitly becomes memoization. pub source_run_id: RunId, } impl ProcessResultMetadata { pub fn new( total_elapsed: Option<Duration>, source: ProcessResultSource, source_run_id: RunId, ) -> Self { Self { total_elapsed, source, source_run_id, } } pub fn new_from_metadata( metadata: ExecutedActionMetadata, source: ProcessResultSource, source_run_id: RunId, ) -> Self { let total_elapsed = match ( metadata.worker_start_timestamp, metadata.worker_completed_timestamp, ) { (Some(started), Some(completed)) => TimeSpan::from_start_and_end(&started, &completed, "") .map(|span| span.duration) .ok(), _ => None, }; Self { total_elapsed, source, source_run_id, } } /// How much faster a cache hit was than running the process again. /// /// This includes the overhead of setting up and cleaning up the process for execution, and it /// should include all overhead for the cache lookup. /// /// If the cache hit was slower than the original process, we return 0. Note that the cache hit /// may still have been faster than rerunning the process a second time, e.g. if speculation /// is used and the cache hit completed before the rerun; still, we cannot know how long the /// second run would have taken, so the best we can do is report 0. /// /// If the original process's execution time was not recorded, we return None because we /// cannot make a meaningful comparison. pub fn time_saved_from_cache( &self, cache_lookup: std::time::Duration, ) -> Option<std::time::Duration> { self.total_elapsed.and_then(|original_process| { let original_process: std::time::Duration = original_process.into(); original_process .checked_sub(cache_lookup) .or_else(|| Some(std::time::Duration::new(0, 0))) }) } } impl From<ProcessResultMetadata> for ExecutedActionMetadata { fn from(metadata: ProcessResultMetadata) -> ExecutedActionMetadata { let (total_start, total_end) = match metadata.total_elapsed { Some(elapsed) => { // Because we do not have the precise start time, we hardcode to starting at UNIX_EPOCH. We // only care about accurately preserving the duration. let start = prost_types::Timestamp { seconds: 0, nanos: 0, }; let end = prost_types::Timestamp { seconds: elapsed.secs as i64, nanos: elapsed.nanos as i32, }; (Some(start), Some(end)) } None => (None, None), }; ExecutedActionMetadata { worker_start_timestamp: total_start, worker_completed_timestamp: total_end, ..ExecutedActionMetadata::default() } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq)] pub enum ProcessResultSource { RanLocally, RanRemotely, HitLocally, HitRemotely, } impl From<ProcessResultSource> for &'static str { fn from(prs: ProcessResultSource) -> &'static str { match prs { ProcessResultSource::RanLocally => "ran_locally", ProcessResultSource::RanRemotely => "ran_remotely", ProcessResultSource::HitLocally => "hit_locally", ProcessResultSource::HitRemotely => "hit_remotely", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, strum_macros::EnumString)] #[strum(serialize_all = "snake_case")] pub enum CacheContentBehavior { Fetch, Validate, Defer, } /// /// Optionally validate that all digests in the result are loadable, returning false if any are not. /// /// If content loading is deferred, a Digest which is discovered to be missing later on during /// execution will cause backtracking. /// pub(crate) async fn check_cache_content( response: &FallibleProcessResultWithPlatform, store: &Store, cache_content_behavior: CacheContentBehavior, ) -> Result<bool, StoreError> { match cache_content_behavior { CacheContentBehavior::Fetch => { let response = response.clone(); let fetch_result = in_workunit!("eager_fetch_action_cache", Level::Trace, |_workunit| store .ensure_downloaded( HashSet::from([response.stdout_digest, response.stderr_digest]), HashSet::from([response.output_directory]) )) .await; match fetch_result { Err(StoreError::MissingDigest {.. }) => Ok(false), Ok(_) => Ok(true), Err(e) => Err(e), } } CacheContentBehavior::Validate =>
{ let directory_digests = vec![response.output_directory.clone()]; let file_digests = vec![response.stdout_digest, response.stderr_digest]; in_workunit!( "eager_validate_action_cache", Level::Trace, |_workunit| async move { store .exists_recursive(directory_digests, file_digests) .await } ) .await }
conditional_block
lib.rs
test)] mod cache_tests; pub mod switched; pub mod children; pub mod docker; #[cfg(test)] mod docker_tests; pub mod local; #[cfg(test)] mod local_tests; pub mod nailgun; pub mod named_caches; pub mod remote; #[cfg(test)] pub mod remote_tests; pub mod remote_cache; #[cfg(test)] mod remote_cache_tests; extern crate uname; pub use crate::children::ManagedChild; pub use crate::named_caches::{CacheName, NamedCaches}; pub use crate::remote_cache::RemoteCacheWarningsBehavior; use crate::remote::EntireExecuteRequest; #[derive(Clone, Debug, PartialEq, Eq)] pub enum ProcessError { /// A Digest was not present in either of the local or remote Stores. MissingDigest(String, Digest), /// All other error types. Unclassified(String), } impl ProcessError { pub fn enrich(self, prefix: &str) -> Self { match self { Self::MissingDigest(s, d) => Self::MissingDigest(format!("{prefix}: {s}"), d), Self::Unclassified(s) => Self::Unclassified(format!("{prefix}: {s}")), } } } impl Display for ProcessError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::MissingDigest(s, d) => { write!(f, "{s}: {d:?}") } Self::Unclassified(s) => write!(f, "{s}"), } } } impl From<StoreError> for ProcessError { fn from(err: StoreError) -> Self { match err { StoreError::MissingDigest(s, d) => Self::MissingDigest(s, d), StoreError::Unclassified(s) => Self::Unclassified(s), } } } impl From<String> for ProcessError { fn from(err: String) -> Self { Self::Unclassified(err) } } #[derive( PartialOrd, Ord, Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize, Deserialize, )] #[allow(non_camel_case_types)] pub enum Platform { Macos_x86_64, Macos_arm64, Linux_x86_64, Linux_arm64, } impl Platform { pub fn current() -> Result<Platform, String> { let platform_info = uname::uname().map_err(|_| "Failed to get local platform info!".to_string())?; match platform_info { uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Linux_x86_64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && (machine.to_lowercase() == "arm64" || machine.to_lowercase() == "aarch64") => { Ok(Platform::Linux_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "arm64" => { Ok(Platform::Macos_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Macos_x86_64) } uname::Info { ref sysname, ref machine, .. } => Err(format!( "Found unknown system/arch name pair {sysname} {machine}" )), } } } impl From<Platform> for String { fn from(platform: Platform) -> String { match platform { Platform::Linux_x86_64 => "linux_x86_64".to_string(), Platform::Linux_arm64 => "linux_arm64".to_string(), Platform::Macos_arm64 => "macos_arm64".to_string(), Platform::Macos_x86_64 => "macos_x86_64".to_string(), } } } impl TryFrom<String> for Platform { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.as_ref() { "macos_arm64" => Ok(Platform::Macos_arm64), "macos_x86_64" => Ok(Platform::Macos_x86_64), "linux_x86_64" => Ok(Platform::Linux_x86_64), "linux_arm64" => Ok(Platform::Linux_arm64), other => Err(format!("Unknown platform {other:?} encountered in parsing")), } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize)] pub enum ProcessCacheScope { // Cached in all locations, regardless of success or failure. Always, // Cached in all locations, but only if the process exits successfully. Successful, // Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of // success vs. failure. PerRestartAlways, // Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if // successful. PerRestartSuccessful, // Will run once per Session, i.e. once per run of Pants. This happens because the engine // de-duplicates identical work; the process is neither memoized in memory nor cached to disk. PerSession, } impl TryFrom<String> for ProcessCacheScope { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.to_lowercase().as_ref() { "always" => Ok(ProcessCacheScope::Always), "successful" => Ok(ProcessCacheScope::Successful), "per_restart_always" => Ok(ProcessCacheScope::PerRestartAlways), "per_restart_successful" => Ok(ProcessCacheScope::PerRestartSuccessful), "per_session" => Ok(ProcessCacheScope::PerSession), other => Err(format!("Unknown Process cache scope: {other:?}")), } } } fn serialize_level<S: serde::Serializer>(level: &log::Level, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&level.to_string()) } /// Input Digests for a process execution. /// /// The `complete` and `nailgun` Digests are the computed union of various inputs. /// /// TODO: See `crate::local::prepare_workdir` regarding validation of overlapping inputs. #[derive(Clone, Debug, DeepSizeOf, Eq, Hash, PartialEq, Serialize)] pub struct InputDigests { /// All of the input Digests, merged and relativized. Runners without the ability to consume the /// Digests individually should directly consume this value. pub complete: DirectoryDigest, /// The merged Digest of any `use_nailgun`-relevant Digests. pub nailgun: DirectoryDigest, /// The input files for the process execution, which will be materialized as mutable inputs in a /// sandbox for the process. /// /// TODO: Rename to `inputs` for symmetry with `immutable_inputs`. pub input_files: DirectoryDigest, /// Immutable input digests to make available in the input root. /// /// These digests are intended for inputs that will be reused between multiple Process /// invocations, without being mutated. This might be useful to provide the tools being executed, /// but can also be used for tool inputs such as compilation artifacts. /// /// The digests will be mounted at the relative path represented by the `RelativePath` keys. /// The executor may choose how to make the digests available, including by just merging /// the digest normally into the input root, creating a symlink to a persistent cache, /// or bind mounting the directory read-only into a persistent cache. Consequently, the mount /// point of each input must not overlap the `input_files`, even for directory entries. /// /// Assumes the build action does not modify the Digest as made available. This may be /// enforced by an executor, for example by bind mounting the directory read-only. pub immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, /// If non-empty, use nailgun in supported runners, using the specified `immutable_inputs` keys /// as server inputs. All other keys (and the input_files) will be client inputs. pub use_nailgun: BTreeSet<RelativePath>, } impl InputDigests { pub async fn new( store: &Store, input_files: DirectoryDigest, immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, use_nailgun: BTreeSet<RelativePath>, ) -> Result<Self, StoreError> { // Collect all digests into `complete`. let mut complete_digests = try_join_all( immutable_inputs .iter() .map(|(path, digest)| store.add_prefix(digest.clone(), path)) .collect::<Vec<_>>(), ) .await?; // And collect only the subset of the Digests which impact nailgun into `nailgun`. let nailgun_digests = immutable_inputs .keys() .zip(complete_digests.iter()) .filter_map(|(path, digest)| { if use_nailgun.contains(path) { Some(digest.clone()) } else { None } }) .collect::<Vec<_>>(); complete_digests.push(input_files.clone()); let (complete, nailgun) = try_join!(store.merge(complete_digests), store.merge(nailgun_digests),)?; Ok(Self { complete: complete, nailgun: nailgun, input_files, immutable_inputs, use_nailgun, }) } pub async fn new_from_merged(store: &Store, from: Vec<InputDigests>) -> Result<Self, StoreError> { let mut merged_immutable_inputs = BTreeMap::new(); for input_digests in from.iter() { let size_before = merged_immutable_inputs.len(); let immutable_inputs = &input_digests.immutable_inputs; merged_immutable_inputs.append(&mut immutable_inputs.clone()); if size_before + immutable_inputs.len()!= merged_immutable_inputs.len() { return Err( format!( "Tried to merge two-or-more immutable inputs at the same path with different values! \ The collision involved one of the entries in: {immutable_inputs:?}" ) .into(), ); } } let complete_digests = from .iter() .map(|input_digests| input_digests.complete.clone()) .collect(); let nailgun_digests = from .iter() .map(|input_digests| input_digests.nailgun.clone()) .collect(); let input_files_digests = from .iter() .map(|input_digests| input_digests.input_files.clone()) .collect(); let (complete, nailgun, input_files) = try_join!( store.merge(complete_digests), store.merge(nailgun_digests), store.merge(input_files_digests), )?; Ok(Self { complete: complete, nailgun: nailgun, input_files: input_files, immutable_inputs: merged_immutable_inputs, use_nailgun: Itertools::concat( from .iter() .map(|input_digests| input_digests.use_nailgun.clone()), ) .into_iter() .collect(), }) } pub fn with_input_files(input_files: DirectoryDigest) -> Self { Self { complete: input_files.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files, immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } /// Split the InputDigests into client and server subsets. /// /// TODO: The server subset will have an accurate `complete` Digest, but the client will not. /// This is currently safe because the nailgun client code does not consume that field, but it /// would be good to find a better factoring. pub fn nailgun_client_and_server(&self) -> (InputDigests, InputDigests) { let (server, client) = self .immutable_inputs .clone() .into_iter() .partition(|(path, _digest)| self.use_nailgun.contains(path)); ( // Client. InputDigests { // TODO: See method doc. complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: self.input_files.clone(), immutable_inputs: client, use_nailgun: BTreeSet::new(), }, // Server. InputDigests { complete: self.nailgun.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: server, use_nailgun: BTreeSet::new(), }, ) } } impl Default for InputDigests { fn default() -> Self { Self { complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } } #[derive(DeepSizeOf, Debug, Clone, Hash, PartialEq, Eq, Serialize)] pub enum ProcessExecutionStrategy { Local, /// Stores the platform_properties. RemoteExecution(Vec<(String, String)>), /// Stores the image name. Docker(String), } impl ProcessExecutionStrategy { /// What to insert into the Command proto so that we don't incorrectly cache /// Docker vs remote execution vs local execution. pub fn cache_value(&self) -> String { match self { Self::Local => "local_execution".to_string(), Self::RemoteExecution(_) => "remote_execution".to_string(), // NB: this image will include the container ID, thanks to // https://github.com/pantsbuild/pants/pull/17101. Self::Docker(image) => format!("docker_execution: {image}"), } } } /// /// A process to be executed. /// /// When executing a `Process` using the `local::CommandRunner`, any `{chroot}` placeholders in the /// environment variables are replaced with the temporary sandbox path. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq, Serialize)] #[derivative(PartialEq, Hash)] pub struct Process { /// /// The arguments to execute. /// /// The first argument should be an absolute or relative path to the binary to execute. /// /// No PATH lookup will be performed unless a PATH environment variable is specified. /// /// No shell expansion will take place. /// pub argv: Vec<String>, /// /// The environment variables to set for the execution. /// /// No other environment variables will be set (except possibly for an empty PATH variable). ///
/// from. Defaults to the `input_files` root. /// pub working_directory: Option<RelativePath>, /// /// All of the input digests for the process. /// pub input_digests: InputDigests, pub output_files: BTreeSet<RelativePath>, pub output_directories: BTreeSet<RelativePath>, pub timeout: Option<std::time::Duration>, /// If not None, then a bounded::CommandRunner executing this Process will set an environment /// variable with this name containing a unique execution slot number. pub execution_slot_variable: Option<String>, /// If non-zero, the amount of parallelism that this process is capable of given its inputs. This /// value does not directly set the number of cores allocated to the process: that is computed /// based on availability, and provided as a template value in the arguments of the process. /// /// When set, a `{pants_concurrency}` variable will be templated into the `argv` of the process. /// /// Processes which set this value may be preempted (i.e. canceled and restarted) for a short /// period after starting if available resources have changed (because other processes have /// started or finished). pub concurrency_available: usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub description: String, // NB: We serialize with a function to avoid adding a serde dep to the logging crate. #[serde(serialize_with = "serialize_level")] pub level: log::Level, /// /// Declares that this process uses the given named caches (which might have associated config /// in the future) at the associated relative paths within its workspace. Cache names must /// contain only lowercase ascii characters or underscores. /// /// Caches are exposed to processes within their workspaces at the relative paths represented /// by the values of the dict. A process may optionally check for the existence of the relevant /// directory, and disable use of that cache if it has not been created by the executor /// (indicating a lack of support for this feature). /// /// These caches are globally shared and so must be concurrency safe: a consumer of the cache /// must never assume that it has exclusive access to the provided directory. /// pub append_only_caches: BTreeMap<CacheName, RelativePath>, /// /// If present, a symlink will be created at.jdk which points to this directory for local /// execution, or a system-installed JDK (ignoring the value of the present Some) for remote /// execution. /// /// This is some technical debt we should clean up; /// see <https://github.com/pantsbuild/pants/issues/6416>. /// pub jdk_home: Option<PathBuf>, pub platform: Platform, pub cache_scope: ProcessCacheScope, pub execution_strategy: ProcessExecutionStrategy, pub remote_cache_speculation_delay: std::time::Duration, } impl Process { /// /// Constructs a Process with default values for most fields, after which the builder pattern can /// be used to set values. /// /// We use the more ergonomic (but possibly slightly slower) "move self for each builder method" /// pattern, so this method is only enabled for test usage: production usage should construct the /// Process struct wholesale. We can reconsider this if we end up with more production callsites /// that require partial options. /// #[cfg(test)] pub fn new(argv: Vec<String>) -> Process { Process { argv, env: BTreeMap::new(), working_directory: None, input_digests: InputDigests::default(), output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: None, description: "".to_string(), level: log::Level::Info, append_only_caches: BTreeMap::new(), jdk_home: None, platform: Platform::current().unwrap(), execution_slot_variable: None, concurrency_available: 0, cache_scope: ProcessCacheScope::Successful, execution_strategy: ProcessExecutionStrategy::Local, remote_cache_speculation_delay: std::time::Duration::from_millis(0), } } /// /// Replaces the environment for this process. /// pub fn env(mut self, env: BTreeMap<String, String>) -> Process { self.env = env; self } /// /// Replaces the working_directory for this process. /// pub fn working_directory(mut self, working_directory: Option<RelativePath>) -> Process { self.working_directory = working_directory; self } /// /// Replaces the output files for this process. /// pub fn output_files(mut self, output_files: BTreeSet<RelativePath>) -> Process { self.output_files = output_files; self } /// /// Replaces the output directories for this process. /// pub fn output_directories(mut self, output_directories: BTreeSet<RelativePath>) -> Process { self.output_directories = output_directories; self } /// /// Replaces the append only caches for this process. /// pub fn append_only_caches( mut self, append_only_caches: BTreeMap<CacheName, RelativePath>, ) -> Process { self.append_only_caches = append_only_caches; self } /// /// Set the execution strategy to Docker, with the specified image. /// pub fn docker(mut self, image: String) -> Process { self.execution_strategy = ProcessExecutionStrategy::Docker(image); self } /// /// Set the execution strategy to remote execution with the provided platform properties. /// pub fn remote_execution_platform_properties( mut self, properties: Vec<(String, String)>, ) -> Process { self.execution_strategy = ProcessExecutionStrategy::RemoteExecution(properties); self } pub fn remote_cache_speculation_delay(mut self, delay: std::time::Duration) -> Process { self.remote_cache_speculation_delay = delay; self } pub fn cache_scope(mut self, cache_scope: ProcessCacheScope) -> Process { self.cache_scope = cache_scope; self } } /// /// The result of running a process. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq)] #[derivative(PartialEq, Hash)] pub struct FallibleProcessResultWithPlatform { pub stdout_digest: Digest, pub stderr_digest: Digest, pub exit_code: i32, pub output_directory: DirectoryDigest, pub platform: Platform, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub metadata: ProcessResultMetadata, } /// Metadata for a ProcessResult corresponding to the REAPI `ExecutedActionMetadata` proto. This /// conversion is lossy, but the interesting parts are preserved. #[derive(Clone, Debug, DeepSizeOf, Eq, PartialEq)] pub struct ProcessResultMetadata { /// The time from starting to completion, including preparing the chroot and cleanup. /// Corresponds to `worker_start_timestamp` and `worker_completed_timestamp` from /// `ExecutedActionMetadata`. /// /// NB: This is optional because the REAPI does not guarantee that it is returned. pub total_elapsed: Option<Duration>, /// The source of the result. pub source: ProcessResultSource, /// The RunId of the Session in which the `ProcessResultSource` was accurate. In further runs /// within the same process, the source of the process implicitly becomes memoization. pub source_run_id: RunId, } impl ProcessResultMetadata { pub fn new( total_elapsed: Option<Duration>, source: ProcessResultSource, source_run_id: RunId, ) -> Self { Self { total_elapsed, source, source_run_id, } } pub fn new_from_metadata( metadata: ExecutedActionMetadata, source: ProcessResultSource, source_run_id: RunId, ) -> Self { let total_elapsed = match ( metadata.worker_start_timestamp, metadata.worker_completed_timestamp, ) { (Some(started), Some(completed)) => TimeSpan::from_start_and_end(&started, &completed, "") .map(|span| span.duration) .ok(), _ => None, }; Self { total_elapsed, source, source_run_id, } } /// How much faster a cache hit was than running the process again. /// /// This includes the overhead of setting up and cleaning up the process for execution, and it /// should include all overhead for the cache lookup. /// /// If the cache hit was slower than the original process, we return 0. Note that the cache hit /// may still have been faster than rerunning the process a second time, e.g. if speculation /// is used and the cache hit completed before the rerun; still, we cannot know how long the /// second run would have taken, so the best we can do is report 0. /// /// If the original process's execution time was not recorded, we return None because we /// cannot make a meaningful comparison. pub fn time_saved_from_cache( &self, cache_lookup: std::time::Duration, ) -> Option<std::time::Duration> { self.total_elapsed.and_then(|original_process| { let original_process: std::time::Duration = original_process.into(); original_process .checked_sub(cache_lookup) .or_else(|| Some(std::time::Duration::new(0, 0))) }) } } impl From<ProcessResultMetadata> for ExecutedActionMetadata { fn from(metadata: ProcessResultMetadata) -> ExecutedActionMetadata { let (total_start, total_end) = match metadata.total_elapsed { Some(elapsed) => { // Because we do not have the precise start time, we hardcode to starting at UNIX_EPOCH. We // only care about accurately preserving the duration. let start = prost_types::Timestamp { seconds: 0, nanos: 0, }; let end = prost_types::Timestamp { seconds: elapsed.secs as i64, nanos: elapsed.nanos as i32, }; (Some(start), Some(end)) } None => (None, None), }; ExecutedActionMetadata { worker_start_timestamp: total_start, worker_completed_timestamp: total_end, ..ExecutedActionMetadata::default() } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq)] pub enum ProcessResultSource { RanLocally, RanRemotely, HitLocally, HitRemotely, } impl From<ProcessResultSource> for &'static str { fn from(prs: ProcessResultSource) -> &'static str { match prs { ProcessResultSource::RanLocally => "ran_locally", ProcessResultSource::RanRemotely => "ran_remotely", ProcessResultSource::HitLocally => "hit_locally", ProcessResultSource::HitRemotely => "hit_remotely", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, strum_macros::EnumString)] #[strum(serialize_all = "snake_case")] pub enum CacheContentBehavior { Fetch, Validate, Defer, } /// /// Optionally validate that all digests in the result are loadable, returning false if any are not. /// /// If content loading is deferred, a Digest which is discovered to be missing later on during /// execution will cause backtracking. /// pub(crate) async fn check_cache_content( response: &FallibleProcessResultWithPlatform
pub env: BTreeMap<String, String>, /// /// A relative path to a directory existing in the `input_files` digest to execute the process
random_line_split
lib.rs
est)] mod cache_tests; pub mod switched; pub mod children; pub mod docker; #[cfg(test)] mod docker_tests; pub mod local; #[cfg(test)] mod local_tests; pub mod nailgun; pub mod named_caches; pub mod remote; #[cfg(test)] pub mod remote_tests; pub mod remote_cache; #[cfg(test)] mod remote_cache_tests; extern crate uname; pub use crate::children::ManagedChild; pub use crate::named_caches::{CacheName, NamedCaches}; pub use crate::remote_cache::RemoteCacheWarningsBehavior; use crate::remote::EntireExecuteRequest; #[derive(Clone, Debug, PartialEq, Eq)] pub enum ProcessError { /// A Digest was not present in either of the local or remote Stores. MissingDigest(String, Digest), /// All other error types. Unclassified(String), } impl ProcessError { pub fn enrich(self, prefix: &str) -> Self { match self { Self::MissingDigest(s, d) => Self::MissingDigest(format!("{prefix}: {s}"), d), Self::Unclassified(s) => Self::Unclassified(format!("{prefix}: {s}")), } } } impl Display for ProcessError { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::MissingDigest(s, d) => { write!(f, "{s}: {d:?}") } Self::Unclassified(s) => write!(f, "{s}"), } } } impl From<StoreError> for ProcessError { fn from(err: StoreError) -> Self { match err { StoreError::MissingDigest(s, d) => Self::MissingDigest(s, d), StoreError::Unclassified(s) => Self::Unclassified(s), } } } impl From<String> for ProcessError { fn from(err: String) -> Self { Self::Unclassified(err) } } #[derive( PartialOrd, Ord, Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize, Deserialize, )] #[allow(non_camel_case_types)] pub enum Platform { Macos_x86_64, Macos_arm64, Linux_x86_64, Linux_arm64, } impl Platform { pub fn current() -> Result<Platform, String> { let platform_info = uname::uname().map_err(|_| "Failed to get local platform info!".to_string())?; match platform_info { uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Linux_x86_64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && (machine.to_lowercase() == "arm64" || machine.to_lowercase() == "aarch64") => { Ok(Platform::Linux_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "arm64" => { Ok(Platform::Macos_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Macos_x86_64) } uname::Info { ref sysname, ref machine, .. } => Err(format!( "Found unknown system/arch name pair {sysname} {machine}" )), } } } impl From<Platform> for String { fn from(platform: Platform) -> String { match platform { Platform::Linux_x86_64 => "linux_x86_64".to_string(), Platform::Linux_arm64 => "linux_arm64".to_string(), Platform::Macos_arm64 => "macos_arm64".to_string(), Platform::Macos_x86_64 => "macos_x86_64".to_string(), } } } impl TryFrom<String> for Platform { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.as_ref() { "macos_arm64" => Ok(Platform::Macos_arm64), "macos_x86_64" => Ok(Platform::Macos_x86_64), "linux_x86_64" => Ok(Platform::Linux_x86_64), "linux_arm64" => Ok(Platform::Linux_arm64), other => Err(format!("Unknown platform {other:?} encountered in parsing")), } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize)] pub enum ProcessCacheScope { // Cached in all locations, regardless of success or failure. Always, // Cached in all locations, but only if the process exits successfully. Successful, // Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of // success vs. failure. PerRestartAlways, // Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if // successful. PerRestartSuccessful, // Will run once per Session, i.e. once per run of Pants. This happens because the engine // de-duplicates identical work; the process is neither memoized in memory nor cached to disk. PerSession, } impl TryFrom<String> for ProcessCacheScope { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.to_lowercase().as_ref() { "always" => Ok(ProcessCacheScope::Always), "successful" => Ok(ProcessCacheScope::Successful), "per_restart_always" => Ok(ProcessCacheScope::PerRestartAlways), "per_restart_successful" => Ok(ProcessCacheScope::PerRestartSuccessful), "per_session" => Ok(ProcessCacheScope::PerSession), other => Err(format!("Unknown Process cache scope: {other:?}")), } } } fn serialize_level<S: serde::Serializer>(level: &log::Level, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&level.to_string()) } /// Input Digests for a process execution. /// /// The `complete` and `nailgun` Digests are the computed union of various inputs. /// /// TODO: See `crate::local::prepare_workdir` regarding validation of overlapping inputs. #[derive(Clone, Debug, DeepSizeOf, Eq, Hash, PartialEq, Serialize)] pub struct InputDigests { /// All of the input Digests, merged and relativized. Runners without the ability to consume the /// Digests individually should directly consume this value. pub complete: DirectoryDigest, /// The merged Digest of any `use_nailgun`-relevant Digests. pub nailgun: DirectoryDigest, /// The input files for the process execution, which will be materialized as mutable inputs in a /// sandbox for the process. /// /// TODO: Rename to `inputs` for symmetry with `immutable_inputs`. pub input_files: DirectoryDigest, /// Immutable input digests to make available in the input root. /// /// These digests are intended for inputs that will be reused between multiple Process /// invocations, without being mutated. This might be useful to provide the tools being executed, /// but can also be used for tool inputs such as compilation artifacts. /// /// The digests will be mounted at the relative path represented by the `RelativePath` keys. /// The executor may choose how to make the digests available, including by just merging /// the digest normally into the input root, creating a symlink to a persistent cache, /// or bind mounting the directory read-only into a persistent cache. Consequently, the mount /// point of each input must not overlap the `input_files`, even for directory entries. /// /// Assumes the build action does not modify the Digest as made available. This may be /// enforced by an executor, for example by bind mounting the directory read-only. pub immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, /// If non-empty, use nailgun in supported runners, using the specified `immutable_inputs` keys /// as server inputs. All other keys (and the input_files) will be client inputs. pub use_nailgun: BTreeSet<RelativePath>, } impl InputDigests { pub async fn new( store: &Store, input_files: DirectoryDigest, immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, use_nailgun: BTreeSet<RelativePath>, ) -> Result<Self, StoreError> { // Collect all digests into `complete`. let mut complete_digests = try_join_all( immutable_inputs .iter() .map(|(path, digest)| store.add_prefix(digest.clone(), path)) .collect::<Vec<_>>(), ) .await?; // And collect only the subset of the Digests which impact nailgun into `nailgun`. let nailgun_digests = immutable_inputs .keys() .zip(complete_digests.iter()) .filter_map(|(path, digest)| { if use_nailgun.contains(path) { Some(digest.clone()) } else { None } }) .collect::<Vec<_>>(); complete_digests.push(input_files.clone()); let (complete, nailgun) = try_join!(store.merge(complete_digests), store.merge(nailgun_digests),)?; Ok(Self { complete: complete, nailgun: nailgun, input_files, immutable_inputs, use_nailgun, }) } pub async fn new_from_merged(store: &Store, from: Vec<InputDigests>) -> Result<Self, StoreError> { let mut merged_immutable_inputs = BTreeMap::new(); for input_digests in from.iter() { let size_before = merged_immutable_inputs.len(); let immutable_inputs = &input_digests.immutable_inputs; merged_immutable_inputs.append(&mut immutable_inputs.clone()); if size_before + immutable_inputs.len()!= merged_immutable_inputs.len() { return Err( format!( "Tried to merge two-or-more immutable inputs at the same path with different values! \ The collision involved one of the entries in: {immutable_inputs:?}" ) .into(), ); } } let complete_digests = from .iter() .map(|input_digests| input_digests.complete.clone()) .collect(); let nailgun_digests = from .iter() .map(|input_digests| input_digests.nailgun.clone()) .collect(); let input_files_digests = from .iter() .map(|input_digests| input_digests.input_files.clone()) .collect(); let (complete, nailgun, input_files) = try_join!( store.merge(complete_digests), store.merge(nailgun_digests), store.merge(input_files_digests), )?; Ok(Self { complete: complete, nailgun: nailgun, input_files: input_files, immutable_inputs: merged_immutable_inputs, use_nailgun: Itertools::concat( from .iter() .map(|input_digests| input_digests.use_nailgun.clone()), ) .into_iter() .collect(), }) } pub fn with_input_files(input_files: DirectoryDigest) -> Self { Self { complete: input_files.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files, immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } /// Split the InputDigests into client and server subsets. /// /// TODO: The server subset will have an accurate `complete` Digest, but the client will not. /// This is currently safe because the nailgun client code does not consume that field, but it /// would be good to find a better factoring. pub fn nailgun_client_and_server(&self) -> (InputDigests, InputDigests) { let (server, client) = self .immutable_inputs .clone() .into_iter() .partition(|(path, _digest)| self.use_nailgun.contains(path)); ( // Client. InputDigests { // TODO: See method doc. complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: self.input_files.clone(), immutable_inputs: client, use_nailgun: BTreeSet::new(), }, // Server. InputDigests { complete: self.nailgun.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: server, use_nailgun: BTreeSet::new(), }, ) } } impl Default for InputDigests { fn default() -> Self { Self { complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } } #[derive(DeepSizeOf, Debug, Clone, Hash, PartialEq, Eq, Serialize)] pub enum ProcessExecutionStrategy { Local, /// Stores the platform_properties. RemoteExecution(Vec<(String, String)>), /// Stores the image name. Docker(String), } impl ProcessExecutionStrategy { /// What to insert into the Command proto so that we don't incorrectly cache /// Docker vs remote execution vs local execution. pub fn cache_value(&self) -> String { match self { Self::Local => "local_execution".to_string(), Self::RemoteExecution(_) => "remote_execution".to_string(), // NB: this image will include the container ID, thanks to // https://github.com/pantsbuild/pants/pull/17101. Self::Docker(image) => format!("docker_execution: {image}"), } } } /// /// A process to be executed. /// /// When executing a `Process` using the `local::CommandRunner`, any `{chroot}` placeholders in the /// environment variables are replaced with the temporary sandbox path. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq, Serialize)] #[derivative(PartialEq, Hash)] pub struct Process { /// /// The arguments to execute. /// /// The first argument should be an absolute or relative path to the binary to execute. /// /// No PATH lookup will be performed unless a PATH environment variable is specified. /// /// No shell expansion will take place. /// pub argv: Vec<String>, /// /// The environment variables to set for the execution. /// /// No other environment variables will be set (except possibly for an empty PATH variable). /// pub env: BTreeMap<String, String>, /// /// A relative path to a directory existing in the `input_files` digest to execute the process /// from. Defaults to the `input_files` root. /// pub working_directory: Option<RelativePath>, /// /// All of the input digests for the process. /// pub input_digests: InputDigests, pub output_files: BTreeSet<RelativePath>, pub output_directories: BTreeSet<RelativePath>, pub timeout: Option<std::time::Duration>, /// If not None, then a bounded::CommandRunner executing this Process will set an environment /// variable with this name containing a unique execution slot number. pub execution_slot_variable: Option<String>, /// If non-zero, the amount of parallelism that this process is capable of given its inputs. This /// value does not directly set the number of cores allocated to the process: that is computed /// based on availability, and provided as a template value in the arguments of the process. /// /// When set, a `{pants_concurrency}` variable will be templated into the `argv` of the process. /// /// Processes which set this value may be preempted (i.e. canceled and restarted) for a short /// period after starting if available resources have changed (because other processes have /// started or finished). pub concurrency_available: usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub description: String, // NB: We serialize with a function to avoid adding a serde dep to the logging crate. #[serde(serialize_with = "serialize_level")] pub level: log::Level, /// /// Declares that this process uses the given named caches (which might have associated config /// in the future) at the associated relative paths within its workspace. Cache names must /// contain only lowercase ascii characters or underscores. /// /// Caches are exposed to processes within their workspaces at the relative paths represented /// by the values of the dict. A process may optionally check for the existence of the relevant /// directory, and disable use of that cache if it has not been created by the executor /// (indicating a lack of support for this feature). /// /// These caches are globally shared and so must be concurrency safe: a consumer of the cache /// must never assume that it has exclusive access to the provided directory. /// pub append_only_caches: BTreeMap<CacheName, RelativePath>, /// /// If present, a symlink will be created at.jdk which points to this directory for local /// execution, or a system-installed JDK (ignoring the value of the present Some) for remote /// execution. /// /// This is some technical debt we should clean up; /// see <https://github.com/pantsbuild/pants/issues/6416>. /// pub jdk_home: Option<PathBuf>, pub platform: Platform, pub cache_scope: ProcessCacheScope, pub execution_strategy: ProcessExecutionStrategy, pub remote_cache_speculation_delay: std::time::Duration, } impl Process { /// /// Constructs a Process with default values for most fields, after which the builder pattern can /// be used to set values. /// /// We use the more ergonomic (but possibly slightly slower) "move self for each builder method" /// pattern, so this method is only enabled for test usage: production usage should construct the /// Process struct wholesale. We can reconsider this if we end up with more production callsites /// that require partial options. /// #[cfg(test)] pub fn new(argv: Vec<String>) -> Process { Process { argv, env: BTreeMap::new(), working_directory: None, input_digests: InputDigests::default(), output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: None, description: "".to_string(), level: log::Level::Info, append_only_caches: BTreeMap::new(), jdk_home: None, platform: Platform::current().unwrap(), execution_slot_variable: None, concurrency_available: 0, cache_scope: ProcessCacheScope::Successful, execution_strategy: ProcessExecutionStrategy::Local, remote_cache_speculation_delay: std::time::Duration::from_millis(0), } } /// /// Replaces the environment for this process. /// pub fn env(mut self, env: BTreeMap<String, String>) -> Process { self.env = env; self } /// /// Replaces the working_directory for this process. /// pub fn working_directory(mut self, working_directory: Option<RelativePath>) -> Process { self.working_directory = working_directory; self } /// /// Replaces the output files for this process. /// pub fn output_files(mut self, output_files: BTreeSet<RelativePath>) -> Process { self.output_files = output_files; self } /// /// Replaces the output directories for this process. /// pub fn output_directories(mut self, output_directories: BTreeSet<RelativePath>) -> Process { self.output_directories = output_directories; self } /// /// Replaces the append only caches for this process. /// pub fn append_only_caches( mut self, append_only_caches: BTreeMap<CacheName, RelativePath>, ) -> Process { self.append_only_caches = append_only_caches; self } /// /// Set the execution strategy to Docker, with the specified image. /// pub fn docker(mut self, image: String) -> Process { self.execution_strategy = ProcessExecutionStrategy::Docker(image); self } /// /// Set the execution strategy to remote execution with the provided platform properties. /// pub fn remote_execution_platform_properties( mut self, properties: Vec<(String, String)>, ) -> Process { self.execution_strategy = ProcessExecutionStrategy::RemoteExecution(properties); self } pub fn remote_cache_speculation_delay(mut self, delay: std::time::Duration) -> Process { self.remote_cache_speculation_delay = delay; self } pub fn cache_scope(mut self, cache_scope: ProcessCacheScope) -> Process { self.cache_scope = cache_scope; self } } /// /// The result of running a process. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq)] #[derivative(PartialEq, Hash)] pub struct FallibleProcessResultWithPlatform { pub stdout_digest: Digest, pub stderr_digest: Digest, pub exit_code: i32, pub output_directory: DirectoryDigest, pub platform: Platform, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub metadata: ProcessResultMetadata, } /// Metadata for a ProcessResult corresponding to the REAPI `ExecutedActionMetadata` proto. This /// conversion is lossy, but the interesting parts are preserved. #[derive(Clone, Debug, DeepSizeOf, Eq, PartialEq)] pub struct ProcessResultMetadata { /// The time from starting to completion, including preparing the chroot and cleanup. /// Corresponds to `worker_start_timestamp` and `worker_completed_timestamp` from /// `ExecutedActionMetadata`. /// /// NB: This is optional because the REAPI does not guarantee that it is returned. pub total_elapsed: Option<Duration>, /// The source of the result. pub source: ProcessResultSource, /// The RunId of the Session in which the `ProcessResultSource` was accurate. In further runs /// within the same process, the source of the process implicitly becomes memoization. pub source_run_id: RunId, } impl ProcessResultMetadata { pub fn new( total_elapsed: Option<Duration>, source: ProcessResultSource, source_run_id: RunId, ) -> Self { Self { total_elapsed, source, source_run_id, } } pub fn new_from_metadata( metadata: ExecutedActionMetadata, source: ProcessResultSource, source_run_id: RunId, ) -> Self { let total_elapsed = match ( metadata.worker_start_timestamp, metadata.worker_completed_timestamp, ) { (Some(started), Some(completed)) => TimeSpan::from_start_and_end(&started, &completed, "") .map(|span| span.duration) .ok(), _ => None, }; Self { total_elapsed, source, source_run_id, } } /// How much faster a cache hit was than running the process again. /// /// This includes the overhead of setting up and cleaning up the process for execution, and it /// should include all overhead for the cache lookup. /// /// If the cache hit was slower than the original process, we return 0. Note that the cache hit /// may still have been faster than rerunning the process a second time, e.g. if speculation /// is used and the cache hit completed before the rerun; still, we cannot know how long the /// second run would have taken, so the best we can do is report 0. /// /// If the original process's execution time was not recorded, we return None because we /// cannot make a meaningful comparison. pub fn time_saved_from_cache( &self, cache_lookup: std::time::Duration, ) -> Option<std::time::Duration> { self.total_elapsed.and_then(|original_process| { let original_process: std::time::Duration = original_process.into(); original_process .checked_sub(cache_lookup) .or_else(|| Some(std::time::Duration::new(0, 0))) }) } } impl From<ProcessResultMetadata> for ExecutedActionMetadata { fn from(metadata: ProcessResultMetadata) -> ExecutedActionMetadata { let (total_start, total_end) = match metadata.total_elapsed { Some(elapsed) => { // Because we do not have the precise start time, we hardcode to starting at UNIX_EPOCH. We // only care about accurately preserving the duration. let start = prost_types::Timestamp { seconds: 0, nanos: 0, }; let end = prost_types::Timestamp { seconds: elapsed.secs as i64, nanos: elapsed.nanos as i32, }; (Some(start), Some(end)) } None => (None, None), }; ExecutedActionMetadata { worker_start_timestamp: total_start, worker_completed_timestamp: total_end, ..ExecutedActionMetadata::default() } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq)] pub enum ProcessResultSource { RanLocally, RanRemotely, HitLocally, HitRemotely, } impl From<ProcessResultSource> for &'static str { fn from(prs: ProcessResultSource) -> &'static str { match prs { ProcessResultSource::RanLocally => "ran_locally", ProcessResultSource::RanRemotely => "ran_remotely", ProcessResultSource::HitLocally => "hit_locally", ProcessResultSource::HitRemotely => "hit_remotely", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, strum_macros::EnumString)] #[strum(serialize_all = "snake_case")] pub enum CacheContentBehavior { Fetch, Validate, Defer, } /// /// Optionally validate that all digests in the result are loadable, returning false if any are not. /// /// If content loading is deferred, a Digest which is discovered to be missing later on during /// execution will cause backtracking. /// pub(crate) async fn check_cache_content( response: &FallibleProcessResult
fmt
identifier_name
lib.rs
est)] mod cache_tests; pub mod switched; pub mod children; pub mod docker; #[cfg(test)] mod docker_tests; pub mod local; #[cfg(test)] mod local_tests; pub mod nailgun; pub mod named_caches; pub mod remote; #[cfg(test)] pub mod remote_tests; pub mod remote_cache; #[cfg(test)] mod remote_cache_tests; extern crate uname; pub use crate::children::ManagedChild; pub use crate::named_caches::{CacheName, NamedCaches}; pub use crate::remote_cache::RemoteCacheWarningsBehavior; use crate::remote::EntireExecuteRequest; #[derive(Clone, Debug, PartialEq, Eq)] pub enum ProcessError { /// A Digest was not present in either of the local or remote Stores. MissingDigest(String, Digest), /// All other error types. Unclassified(String), } impl ProcessError { pub fn enrich(self, prefix: &str) -> Self { match self { Self::MissingDigest(s, d) => Self::MissingDigest(format!("{prefix}: {s}"), d), Self::Unclassified(s) => Self::Unclassified(format!("{prefix}: {s}")), } } } impl Display for ProcessError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::MissingDigest(s, d) => { write!(f, "{s}: {d:?}") } Self::Unclassified(s) => write!(f, "{s}"), } } } impl From<StoreError> for ProcessError { fn from(err: StoreError) -> Self { match err { StoreError::MissingDigest(s, d) => Self::MissingDigest(s, d), StoreError::Unclassified(s) => Self::Unclassified(s), } } } impl From<String> for ProcessError { fn from(err: String) -> Self { Self::Unclassified(err) } } #[derive( PartialOrd, Ord, Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize, Deserialize, )] #[allow(non_camel_case_types)] pub enum Platform { Macos_x86_64, Macos_arm64, Linux_x86_64, Linux_arm64, } impl Platform { pub fn current() -> Result<Platform, String> { let platform_info = uname::uname().map_err(|_| "Failed to get local platform info!".to_string())?; match platform_info { uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Linux_x86_64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "linux" && (machine.to_lowercase() == "arm64" || machine.to_lowercase() == "aarch64") => { Ok(Platform::Linux_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "arm64" => { Ok(Platform::Macos_arm64) } uname::Info { ref sysname, ref machine, .. } if sysname.to_lowercase() == "darwin" && machine.to_lowercase() == "x86_64" => { Ok(Platform::Macos_x86_64) } uname::Info { ref sysname, ref machine, .. } => Err(format!( "Found unknown system/arch name pair {sysname} {machine}" )), } } } impl From<Platform> for String { fn from(platform: Platform) -> String { match platform { Platform::Linux_x86_64 => "linux_x86_64".to_string(), Platform::Linux_arm64 => "linux_arm64".to_string(), Platform::Macos_arm64 => "macos_arm64".to_string(), Platform::Macos_x86_64 => "macos_x86_64".to_string(), } } } impl TryFrom<String> for Platform { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.as_ref() { "macos_arm64" => Ok(Platform::Macos_arm64), "macos_x86_64" => Ok(Platform::Macos_x86_64), "linux_x86_64" => Ok(Platform::Linux_x86_64), "linux_arm64" => Ok(Platform::Linux_arm64), other => Err(format!("Unknown platform {other:?} encountered in parsing")), } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq, Hash, Serialize)] pub enum ProcessCacheScope { // Cached in all locations, regardless of success or failure. Always, // Cached in all locations, but only if the process exits successfully. Successful, // Cached only in memory (i.e. memoized in pantsd), but never persistently, regardless of // success vs. failure. PerRestartAlways, // Cached only in memory (i.e. memoized in pantsd), but never persistently, and only if // successful. PerRestartSuccessful, // Will run once per Session, i.e. once per run of Pants. This happens because the engine // de-duplicates identical work; the process is neither memoized in memory nor cached to disk. PerSession, } impl TryFrom<String> for ProcessCacheScope { type Error = String; fn try_from(variant_candidate: String) -> Result<Self, Self::Error> { match variant_candidate.to_lowercase().as_ref() { "always" => Ok(ProcessCacheScope::Always), "successful" => Ok(ProcessCacheScope::Successful), "per_restart_always" => Ok(ProcessCacheScope::PerRestartAlways), "per_restart_successful" => Ok(ProcessCacheScope::PerRestartSuccessful), "per_session" => Ok(ProcessCacheScope::PerSession), other => Err(format!("Unknown Process cache scope: {other:?}")), } } } fn serialize_level<S: serde::Serializer>(level: &log::Level, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&level.to_string()) } /// Input Digests for a process execution. /// /// The `complete` and `nailgun` Digests are the computed union of various inputs. /// /// TODO: See `crate::local::prepare_workdir` regarding validation of overlapping inputs. #[derive(Clone, Debug, DeepSizeOf, Eq, Hash, PartialEq, Serialize)] pub struct InputDigests { /// All of the input Digests, merged and relativized. Runners without the ability to consume the /// Digests individually should directly consume this value. pub complete: DirectoryDigest, /// The merged Digest of any `use_nailgun`-relevant Digests. pub nailgun: DirectoryDigest, /// The input files for the process execution, which will be materialized as mutable inputs in a /// sandbox for the process. /// /// TODO: Rename to `inputs` for symmetry with `immutable_inputs`. pub input_files: DirectoryDigest, /// Immutable input digests to make available in the input root. /// /// These digests are intended for inputs that will be reused between multiple Process /// invocations, without being mutated. This might be useful to provide the tools being executed, /// but can also be used for tool inputs such as compilation artifacts. /// /// The digests will be mounted at the relative path represented by the `RelativePath` keys. /// The executor may choose how to make the digests available, including by just merging /// the digest normally into the input root, creating a symlink to a persistent cache, /// or bind mounting the directory read-only into a persistent cache. Consequently, the mount /// point of each input must not overlap the `input_files`, even for directory entries. /// /// Assumes the build action does not modify the Digest as made available. This may be /// enforced by an executor, for example by bind mounting the directory read-only. pub immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, /// If non-empty, use nailgun in supported runners, using the specified `immutable_inputs` keys /// as server inputs. All other keys (and the input_files) will be client inputs. pub use_nailgun: BTreeSet<RelativePath>, } impl InputDigests { pub async fn new( store: &Store, input_files: DirectoryDigest, immutable_inputs: BTreeMap<RelativePath, DirectoryDigest>, use_nailgun: BTreeSet<RelativePath>, ) -> Result<Self, StoreError> { // Collect all digests into `complete`. let mut complete_digests = try_join_all( immutable_inputs .iter() .map(|(path, digest)| store.add_prefix(digest.clone(), path)) .collect::<Vec<_>>(), ) .await?; // And collect only the subset of the Digests which impact nailgun into `nailgun`. let nailgun_digests = immutable_inputs .keys() .zip(complete_digests.iter()) .filter_map(|(path, digest)| { if use_nailgun.contains(path) { Some(digest.clone()) } else { None } }) .collect::<Vec<_>>(); complete_digests.push(input_files.clone()); let (complete, nailgun) = try_join!(store.merge(complete_digests), store.merge(nailgun_digests),)?; Ok(Self { complete: complete, nailgun: nailgun, input_files, immutable_inputs, use_nailgun, }) } pub async fn new_from_merged(store: &Store, from: Vec<InputDigests>) -> Result<Self, StoreError> { let mut merged_immutable_inputs = BTreeMap::new(); for input_digests in from.iter() { let size_before = merged_immutable_inputs.len(); let immutable_inputs = &input_digests.immutable_inputs; merged_immutable_inputs.append(&mut immutable_inputs.clone()); if size_before + immutable_inputs.len()!= merged_immutable_inputs.len() { return Err( format!( "Tried to merge two-or-more immutable inputs at the same path with different values! \ The collision involved one of the entries in: {immutable_inputs:?}" ) .into(), ); } } let complete_digests = from .iter() .map(|input_digests| input_digests.complete.clone()) .collect(); let nailgun_digests = from .iter() .map(|input_digests| input_digests.nailgun.clone()) .collect(); let input_files_digests = from .iter() .map(|input_digests| input_digests.input_files.clone()) .collect(); let (complete, nailgun, input_files) = try_join!( store.merge(complete_digests), store.merge(nailgun_digests), store.merge(input_files_digests), )?; Ok(Self { complete: complete, nailgun: nailgun, input_files: input_files, immutable_inputs: merged_immutable_inputs, use_nailgun: Itertools::concat( from .iter() .map(|input_digests| input_digests.use_nailgun.clone()), ) .into_iter() .collect(), }) } pub fn with_input_files(input_files: DirectoryDigest) -> Self { Self { complete: input_files.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files, immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } /// Split the InputDigests into client and server subsets. /// /// TODO: The server subset will have an accurate `complete` Digest, but the client will not. /// This is currently safe because the nailgun client code does not consume that field, but it /// would be good to find a better factoring. pub fn nailgun_client_and_server(&self) -> (InputDigests, InputDigests) { let (server, client) = self .immutable_inputs .clone() .into_iter() .partition(|(path, _digest)| self.use_nailgun.contains(path)); ( // Client. InputDigests { // TODO: See method doc. complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: self.input_files.clone(), immutable_inputs: client, use_nailgun: BTreeSet::new(), }, // Server. InputDigests { complete: self.nailgun.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: server, use_nailgun: BTreeSet::new(), }, ) } } impl Default for InputDigests { fn default() -> Self { Self { complete: EMPTY_DIRECTORY_DIGEST.clone(), nailgun: EMPTY_DIRECTORY_DIGEST.clone(), input_files: EMPTY_DIRECTORY_DIGEST.clone(), immutable_inputs: BTreeMap::new(), use_nailgun: BTreeSet::new(), } } } #[derive(DeepSizeOf, Debug, Clone, Hash, PartialEq, Eq, Serialize)] pub enum ProcessExecutionStrategy { Local, /// Stores the platform_properties. RemoteExecution(Vec<(String, String)>), /// Stores the image name. Docker(String), } impl ProcessExecutionStrategy { /// What to insert into the Command proto so that we don't incorrectly cache /// Docker vs remote execution vs local execution. pub fn cache_value(&self) -> String { match self { Self::Local => "local_execution".to_string(), Self::RemoteExecution(_) => "remote_execution".to_string(), // NB: this image will include the container ID, thanks to // https://github.com/pantsbuild/pants/pull/17101. Self::Docker(image) => format!("docker_execution: {image}"), } } } /// /// A process to be executed. /// /// When executing a `Process` using the `local::CommandRunner`, any `{chroot}` placeholders in the /// environment variables are replaced with the temporary sandbox path. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq, Serialize)] #[derivative(PartialEq, Hash)] pub struct Process { /// /// The arguments to execute. /// /// The first argument should be an absolute or relative path to the binary to execute. /// /// No PATH lookup will be performed unless a PATH environment variable is specified. /// /// No shell expansion will take place. /// pub argv: Vec<String>, /// /// The environment variables to set for the execution. /// /// No other environment variables will be set (except possibly for an empty PATH variable). /// pub env: BTreeMap<String, String>, /// /// A relative path to a directory existing in the `input_files` digest to execute the process /// from. Defaults to the `input_files` root. /// pub working_directory: Option<RelativePath>, /// /// All of the input digests for the process. /// pub input_digests: InputDigests, pub output_files: BTreeSet<RelativePath>, pub output_directories: BTreeSet<RelativePath>, pub timeout: Option<std::time::Duration>, /// If not None, then a bounded::CommandRunner executing this Process will set an environment /// variable with this name containing a unique execution slot number. pub execution_slot_variable: Option<String>, /// If non-zero, the amount of parallelism that this process is capable of given its inputs. This /// value does not directly set the number of cores allocated to the process: that is computed /// based on availability, and provided as a template value in the arguments of the process. /// /// When set, a `{pants_concurrency}` variable will be templated into the `argv` of the process. /// /// Processes which set this value may be preempted (i.e. canceled and restarted) for a short /// period after starting if available resources have changed (because other processes have /// started or finished). pub concurrency_available: usize, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub description: String, // NB: We serialize with a function to avoid adding a serde dep to the logging crate. #[serde(serialize_with = "serialize_level")] pub level: log::Level, /// /// Declares that this process uses the given named caches (which might have associated config /// in the future) at the associated relative paths within its workspace. Cache names must /// contain only lowercase ascii characters or underscores. /// /// Caches are exposed to processes within their workspaces at the relative paths represented /// by the values of the dict. A process may optionally check for the existence of the relevant /// directory, and disable use of that cache if it has not been created by the executor /// (indicating a lack of support for this feature). /// /// These caches are globally shared and so must be concurrency safe: a consumer of the cache /// must never assume that it has exclusive access to the provided directory. /// pub append_only_caches: BTreeMap<CacheName, RelativePath>, /// /// If present, a symlink will be created at.jdk which points to this directory for local /// execution, or a system-installed JDK (ignoring the value of the present Some) for remote /// execution. /// /// This is some technical debt we should clean up; /// see <https://github.com/pantsbuild/pants/issues/6416>. /// pub jdk_home: Option<PathBuf>, pub platform: Platform, pub cache_scope: ProcessCacheScope, pub execution_strategy: ProcessExecutionStrategy, pub remote_cache_speculation_delay: std::time::Duration, } impl Process { /// /// Constructs a Process with default values for most fields, after which the builder pattern can /// be used to set values. /// /// We use the more ergonomic (but possibly slightly slower) "move self for each builder method" /// pattern, so this method is only enabled for test usage: production usage should construct the /// Process struct wholesale. We can reconsider this if we end up with more production callsites /// that require partial options. /// #[cfg(test)] pub fn new(argv: Vec<String>) -> Process { Process { argv, env: BTreeMap::new(), working_directory: None, input_digests: InputDigests::default(), output_files: BTreeSet::new(), output_directories: BTreeSet::new(), timeout: None, description: "".to_string(), level: log::Level::Info, append_only_caches: BTreeMap::new(), jdk_home: None, platform: Platform::current().unwrap(), execution_slot_variable: None, concurrency_available: 0, cache_scope: ProcessCacheScope::Successful, execution_strategy: ProcessExecutionStrategy::Local, remote_cache_speculation_delay: std::time::Duration::from_millis(0), } } /// /// Replaces the environment for this process. /// pub fn env(mut self, env: BTreeMap<String, String>) -> Process { self.env = env; self } /// /// Replaces the working_directory for this process. /// pub fn working_directory(mut self, working_directory: Option<RelativePath>) -> Process { self.working_directory = working_directory; self } /// /// Replaces the output files for this process. /// pub fn output_files(mut self, output_files: BTreeSet<RelativePath>) -> Process { self.output_files = output_files; self } /// /// Replaces the output directories for this process. /// pub fn output_directories(mut self, output_directories: BTreeSet<RelativePath>) -> Process { self.output_directories = output_directories; self } /// /// Replaces the append only caches for this process. /// pub fn append_only_caches( mut self, append_only_caches: BTreeMap<CacheName, RelativePath>, ) -> Process
/// /// Set the execution strategy to Docker, with the specified image. /// pub fn docker(mut self, image: String) -> Process { self.execution_strategy = ProcessExecutionStrategy::Docker(image); self } /// /// Set the execution strategy to remote execution with the provided platform properties. /// pub fn remote_execution_platform_properties( mut self, properties: Vec<(String, String)>, ) -> Process { self.execution_strategy = ProcessExecutionStrategy::RemoteExecution(properties); self } pub fn remote_cache_speculation_delay(mut self, delay: std::time::Duration) -> Process { self.remote_cache_speculation_delay = delay; self } pub fn cache_scope(mut self, cache_scope: ProcessCacheScope) -> Process { self.cache_scope = cache_scope; self } } /// /// The result of running a process. /// #[derive(DeepSizeOf, Derivative, Clone, Debug, Eq)] #[derivative(PartialEq, Hash)] pub struct FallibleProcessResultWithPlatform { pub stdout_digest: Digest, pub stderr_digest: Digest, pub exit_code: i32, pub output_directory: DirectoryDigest, pub platform: Platform, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub metadata: ProcessResultMetadata, } /// Metadata for a ProcessResult corresponding to the REAPI `ExecutedActionMetadata` proto. This /// conversion is lossy, but the interesting parts are preserved. #[derive(Clone, Debug, DeepSizeOf, Eq, PartialEq)] pub struct ProcessResultMetadata { /// The time from starting to completion, including preparing the chroot and cleanup. /// Corresponds to `worker_start_timestamp` and `worker_completed_timestamp` from /// `ExecutedActionMetadata`. /// /// NB: This is optional because the REAPI does not guarantee that it is returned. pub total_elapsed: Option<Duration>, /// The source of the result. pub source: ProcessResultSource, /// The RunId of the Session in which the `ProcessResultSource` was accurate. In further runs /// within the same process, the source of the process implicitly becomes memoization. pub source_run_id: RunId, } impl ProcessResultMetadata { pub fn new( total_elapsed: Option<Duration>, source: ProcessResultSource, source_run_id: RunId, ) -> Self { Self { total_elapsed, source, source_run_id, } } pub fn new_from_metadata( metadata: ExecutedActionMetadata, source: ProcessResultSource, source_run_id: RunId, ) -> Self { let total_elapsed = match ( metadata.worker_start_timestamp, metadata.worker_completed_timestamp, ) { (Some(started), Some(completed)) => TimeSpan::from_start_and_end(&started, &completed, "") .map(|span| span.duration) .ok(), _ => None, }; Self { total_elapsed, source, source_run_id, } } /// How much faster a cache hit was than running the process again. /// /// This includes the overhead of setting up and cleaning up the process for execution, and it /// should include all overhead for the cache lookup. /// /// If the cache hit was slower than the original process, we return 0. Note that the cache hit /// may still have been faster than rerunning the process a second time, e.g. if speculation /// is used and the cache hit completed before the rerun; still, we cannot know how long the /// second run would have taken, so the best we can do is report 0. /// /// If the original process's execution time was not recorded, we return None because we /// cannot make a meaningful comparison. pub fn time_saved_from_cache( &self, cache_lookup: std::time::Duration, ) -> Option<std::time::Duration> { self.total_elapsed.and_then(|original_process| { let original_process: std::time::Duration = original_process.into(); original_process .checked_sub(cache_lookup) .or_else(|| Some(std::time::Duration::new(0, 0))) }) } } impl From<ProcessResultMetadata> for ExecutedActionMetadata { fn from(metadata: ProcessResultMetadata) -> ExecutedActionMetadata { let (total_start, total_end) = match metadata.total_elapsed { Some(elapsed) => { // Because we do not have the precise start time, we hardcode to starting at UNIX_EPOCH. We // only care about accurately preserving the duration. let start = prost_types::Timestamp { seconds: 0, nanos: 0, }; let end = prost_types::Timestamp { seconds: elapsed.secs as i64, nanos: elapsed.nanos as i32, }; (Some(start), Some(end)) } None => (None, None), }; ExecutedActionMetadata { worker_start_timestamp: total_start, worker_completed_timestamp: total_end, ..ExecutedActionMetadata::default() } } } #[derive(Clone, Copy, Debug, DeepSizeOf, Eq, PartialEq)] pub enum ProcessResultSource { RanLocally, RanRemotely, HitLocally, HitRemotely, } impl From<ProcessResultSource> for &'static str { fn from(prs: ProcessResultSource) -> &'static str { match prs { ProcessResultSource::RanLocally => "ran_locally", ProcessResultSource::RanRemotely => "ran_remotely", ProcessResultSource::HitLocally => "hit_locally", ProcessResultSource::HitRemotely => "hit_remotely", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, strum_macros::EnumString)] #[strum(serialize_all = "snake_case")] pub enum CacheContentBehavior { Fetch, Validate, Defer, } /// /// Optionally validate that all digests in the result are loadable, returning false if any are not. /// /// If content loading is deferred, a Digest which is discovered to be missing later on during /// execution will cause backtracking. /// pub(crate) async fn check_cache_content( response: &FallibleProcessResult
{ self.append_only_caches = append_only_caches; self }
identifier_body
main.rs
#![no_std] #![no_main] #![feature(global_asm, asm, naked_functions)] #![feature(panic_info_message)] #[macro_use] extern crate bitflags; use volatile::Volatile; use lcd::*; use gpu::FramebufferConfig; use core::ptr::{read_volatile, write_volatile}; use core::{str, fmt, cmp, mem}; use core::fmt::{Write, UpperHex, Binary}; use common::input::GamePad; use common::util::reg::*; use common::Console; use common::mem::arm11::*; use num_traits::PrimInt; mod lcd; mod gpu; mod panic; mod mpcore; mod boot11; mod exceptions; const SCREEN_TOP_WIDTH: usize = 400; const SCREEN_BOTTOM_WIDTH: usize = 320; const SCREEN_HEIGHT: usize = 240; const SCREEN_TOP_FBSIZE: usize = (3 * SCREEN_TOP_WIDTH * SCREEN_HEIGHT); const SCREEN_BOTTOM_FBSIZE: usize = (3 * SCREEN_BOTTOM_WIDTH * SCREEN_HEIGHT); global_asm!(r#" .section.text.start .global _start .align 4 .arm _start: cpsid aif, #0x13 ldr r0, =0x24000000 mov sp, r0 blx _rust_start .pool "#); const FERRIS: &[u8] = include_bytes!("../../ferris.data"); #[no_mangle] pub unsafe extern "C" fn _rust_start() ->! { exceptions::install_handlers(); // mpcore::enable_scu(); // mpcore::enable_smp_mode(); // mpcore::disable_interrupts(); // mpcore::clean_and_invalidate_data_cache(); // if mpcore::cpu_id() == 0 { // boot11::start_cpu(1, _start); // loop {} // } common::start(); busy_sleep(1000); let fb_top = core::slice::from_raw_parts_mut::<[u8; 3]>(0x18000000 as *mut _, SCREEN_TOP_FBSIZE / 3); init_screens(fb_top); { for (pixel, ferris_pixel) in fb_top.iter_mut().zip(FERRIS.chunks(3)) { write_volatile(&mut pixel[0], ferris_pixel[2]); write_volatile(&mut pixel[1], ferris_pixel[1]); write_volatile(&mut pixel[2], ferris_pixel[0]); } } let ref mut console = Console::new(fb_top, 400, 240); let mut pad = GamePad::new(); let mut bg_color = U32HexEditor::new(0); let mut fg_color = U32HexEditor::new(0xFFFFFF00); let mut fg_selected = false; loop { console.go_to(0, 0); let base = AXI_WRAM.end - 0x60; print_addr::<u32>(console, base + 0x10, "svc vector instr"); print_addr::<u32>(console, base + 0x10, "svc vector addr"); print_addr_bin::<u16>(console, 0x10146000, "pad"); writeln!(console, "cpsr = 0b{:032b}", mpcore::cpu_status_reg()).ok(); static mut N: u32 = 0; writeln!(console, "frame {}", N).ok(); N = N.wrapping_add(1); if!pad.l() && pad.y_once() { fg_selected =!fg_selected; } console.set_bg(u32_to_rgb(bg_color.value())); console.set_fg(u32_to_rgb(fg_color.value())); { if!fg_selected { bg_color.manipulate(&pad); } write!(console, "bg_color = ").ok(); bg_color.render_with_cursor(console,!fg_selected); writeln!(console, "").ok(); } { if fg_selected { fg_color.manipulate(&pad); } write!(console, "fg_color = ").ok(); fg_color.render_with_cursor(console, fg_selected); writeln!(console, "").ok(); } // trigger svc if pad.l() && pad.a() { asm!("svc 42"); } // trigger data abort if pad.l() && pad.b() { RW::<usize>::new(0).write(42); } // trigger prefetch abort if pad.l() && pad.y() { asm!("bkpt"); } // trigger undefined instruction if pad.l() && pad.x() { asm!(".word 0xFFFFFFFF"); } pad.poll(); } } struct U32HexEditor { cursor_pos: usize, value: u32, } impl U32HexEditor { const fn new(value: u32) -> Self { Self { cursor_pos: 0, value, } } fn cursor_left(&mut self) { self.cursor_pos += 1; self.cursor_pos %= 8; } fn cursor_right(&mut self) { self.cursor_pos += 8 - 1; self.cursor_pos %= 8; } fn increment(&mut self) { self.modify(|digit| { *digit += 1; *digit %= 16; }) } fn decrement(&mut self) { self.modify(|digit| { *digit += 16 - 1; *digit %= 16; }) } fn modify(&mut self, f: impl FnOnce(&mut u32)) { let pos = self.cursor_pos * 4; // Extract digit let mut digit = (self.value >> pos) & 0xF; f(&mut digit); digit &= 0xF; // Clear digit self.value &=!(0xF << pos); // Insert digit self.value |= digit << pos; } fn render(&self, console: &mut Console) { self.render_with_cursor(console, true); } fn render_with_cursor(&self, console: &mut Console, with_cursor: bool) { write!(console, "0x").ok(); for cursor_pos in (0..8).rev() { let pos = cursor_pos * 4; let digit = (self.value >> pos) & 0xF; if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } write!(console, "{:X}", digit).ok(); if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } } } fn manipulate(&mut self, pad: &GamePad) { if pad.left_once() { self.cursor_left(); } if pad.right_once() { self.cursor_right(); } if pad.up_once()
if pad.down_once() { self.decrement(); } } fn set_value(&mut self, value: u32) { self.value = value; } fn value(&self) -> u32 { self.value } } pub unsafe fn init_screens(top_fb: &mut [[u8; 3]]) { let brightness_level = 0xFEFE; (*(0x10141200 as *mut Volatile<u32>)).write(0x1007F); (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); (*(0x10202014 as *mut Volatile<u32>)).write(0x00000001); (*(0x1020200C as *mut Volatile<u32>)).update(|v| *v &= 0xFFFEFFFE); (*(0x10202240 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202A40 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202244 as *mut Volatile<u32>)).write(0x1023E); (*(0x10202A44 as *mut Volatile<u32>)).write(0x1023E); //Top screen let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.set_pixel_clock(0x1c2); top_fb_conf.set_hblank_timer(0xd1); top_fb_conf.reg(0x08).write(0x1c1); top_fb_conf.reg(0x0c).write(0x1c1); top_fb_conf.set_window_x_start(0); top_fb_conf.set_window_x_end(0xcf); top_fb_conf.set_window_y_start(0xd1); top_fb_conf.reg(0x1c).write(0x01c501c1); top_fb_conf.set_window_y_end(0x10000); top_fb_conf.set_vblank_timer(0x19d); top_fb_conf.reg(0x28).write(0x2); top_fb_conf.reg(0x2c).write(0x192); top_fb_conf.set_vtotal(0x192); top_fb_conf.set_vdisp(0x192); top_fb_conf.set_vertical_data_offset(0x1); top_fb_conf.reg(0x3c).write(0x2); top_fb_conf.reg(0x40).write(0x01960192); top_fb_conf.reg(0x44).write(0); top_fb_conf.reg(0x48).write(0); top_fb_conf.reg(0x5C).write(0x00f00190); top_fb_conf.reg(0x60).write(0x01c100d1); top_fb_conf.reg(0x64).write(0x01920002); top_fb_conf.set_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_format(0x80341); top_fb_conf.reg(0x74).write(0x10501); top_fb_conf.set_shown_buffer(0); top_fb_conf.set_alt_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_alt_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_stride(0x2D0); top_fb_conf.reg(0x9C).write(0); // Set up color LUT top_fb_conf.set_color_lut_index(0); for i in 0..= 255 { top_fb_conf.set_color_lut_color(0x10101 * i); } setup_framebuffers(top_fb.as_ptr() as _); } unsafe fn setup_framebuffers(addr: u32) { (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.reg(0x68).write(addr); top_fb_conf.reg(0x6c).write(addr); top_fb_conf.reg(0x94).write(addr); top_fb_conf.reg(0x98).write(addr); // (*(0x10400568 as *mut Volatile<u32>)).write((u32)fbs[0].bottom); // (*(0x1040056c as *mut Volatile<u32>)).write((u32)fbs[1].bottom); //Set framebuffer format, framebuffer select and stride top_fb_conf.reg(0x70).write(0x80341); top_fb_conf.reg(0x78).write(0); top_fb_conf.reg(0x90).write(0x2D0); (*(0x10400570 as *mut Volatile<u32>)).write(0x80301); (*(0x10400578 as *mut Volatile<u32>)).write(0); (*(0x10400590 as *mut Volatile<u32>)).write(0x2D0); (*(0x10202204 as *mut Volatile<u32>)).write(0x00000000); //unset LCD fill (*(0x10202A04 as *mut Volatile<u32>)).write(0x00000000); } unsafe fn print_addr<T: PrimInt + UpperHex>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0x{value:0width$X} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 2 * mem::size_of::<T>(), label = label, ).ok(); } unsafe fn print_addr_bin<T: PrimInt + Binary>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0b{value:0width$b} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 8 * mem::size_of::<T>(), label = label, ).ok(); } fn busy_sleep(iterations: usize) { let n = 42; for _ in 0.. 15 * iterations { unsafe { read_volatile(&n); } } } fn u32_to_rgb(n: u32) -> [u8; 3] { let c = n.to_be_bytes(); [c[0], c[1], c[2]] }
{ self.increment(); }
conditional_block
main.rs
#![no_std] #![no_main] #![feature(global_asm, asm, naked_functions)] #![feature(panic_info_message)] #[macro_use] extern crate bitflags; use volatile::Volatile; use lcd::*; use gpu::FramebufferConfig; use core::ptr::{read_volatile, write_volatile}; use core::{str, fmt, cmp, mem}; use core::fmt::{Write, UpperHex, Binary}; use common::input::GamePad; use common::util::reg::*; use common::Console; use common::mem::arm11::*; use num_traits::PrimInt; mod lcd; mod gpu; mod panic; mod mpcore; mod boot11; mod exceptions; const SCREEN_TOP_WIDTH: usize = 400; const SCREEN_BOTTOM_WIDTH: usize = 320; const SCREEN_HEIGHT: usize = 240; const SCREEN_TOP_FBSIZE: usize = (3 * SCREEN_TOP_WIDTH * SCREEN_HEIGHT); const SCREEN_BOTTOM_FBSIZE: usize = (3 * SCREEN_BOTTOM_WIDTH * SCREEN_HEIGHT); global_asm!(r#" .section.text.start .global _start .align 4 .arm _start: cpsid aif, #0x13 ldr r0, =0x24000000 mov sp, r0 blx _rust_start .pool "#); const FERRIS: &[u8] = include_bytes!("../../ferris.data"); #[no_mangle] pub unsafe extern "C" fn _rust_start() ->! { exceptions::install_handlers(); // mpcore::enable_scu(); // mpcore::enable_smp_mode(); // mpcore::disable_interrupts(); // mpcore::clean_and_invalidate_data_cache(); // if mpcore::cpu_id() == 0 { // boot11::start_cpu(1, _start); // loop {} // } common::start(); busy_sleep(1000); let fb_top = core::slice::from_raw_parts_mut::<[u8; 3]>(0x18000000 as *mut _, SCREEN_TOP_FBSIZE / 3); init_screens(fb_top); { for (pixel, ferris_pixel) in fb_top.iter_mut().zip(FERRIS.chunks(3)) { write_volatile(&mut pixel[0], ferris_pixel[2]); write_volatile(&mut pixel[1], ferris_pixel[1]); write_volatile(&mut pixel[2], ferris_pixel[0]); } } let ref mut console = Console::new(fb_top, 400, 240); let mut pad = GamePad::new(); let mut bg_color = U32HexEditor::new(0); let mut fg_color = U32HexEditor::new(0xFFFFFF00); let mut fg_selected = false; loop { console.go_to(0, 0); let base = AXI_WRAM.end - 0x60; print_addr::<u32>(console, base + 0x10, "svc vector instr"); print_addr::<u32>(console, base + 0x10, "svc vector addr"); print_addr_bin::<u16>(console, 0x10146000, "pad"); writeln!(console, "cpsr = 0b{:032b}", mpcore::cpu_status_reg()).ok(); static mut N: u32 = 0; writeln!(console, "frame {}", N).ok(); N = N.wrapping_add(1); if!pad.l() && pad.y_once() { fg_selected =!fg_selected; } console.set_bg(u32_to_rgb(bg_color.value())); console.set_fg(u32_to_rgb(fg_color.value())); { if!fg_selected { bg_color.manipulate(&pad); } write!(console, "bg_color = ").ok(); bg_color.render_with_cursor(console,!fg_selected); writeln!(console, "").ok(); } { if fg_selected { fg_color.manipulate(&pad); } write!(console, "fg_color = ").ok(); fg_color.render_with_cursor(console, fg_selected); writeln!(console, "").ok(); } // trigger svc if pad.l() && pad.a() { asm!("svc 42"); } // trigger data abort if pad.l() && pad.b() { RW::<usize>::new(0).write(42); } // trigger prefetch abort if pad.l() && pad.y() { asm!("bkpt"); } // trigger undefined instruction if pad.l() && pad.x() { asm!(".word 0xFFFFFFFF"); } pad.poll(); } } struct U32HexEditor { cursor_pos: usize, value: u32, } impl U32HexEditor { const fn new(value: u32) -> Self { Self { cursor_pos: 0, value, } } fn cursor_left(&mut self) { self.cursor_pos += 1; self.cursor_pos %= 8; } fn cursor_right(&mut self) { self.cursor_pos += 8 - 1; self.cursor_pos %= 8; } fn increment(&mut self) { self.modify(|digit| { *digit += 1; *digit %= 16; }) } fn decrement(&mut self) { self.modify(|digit| { *digit += 16 - 1; *digit %= 16; }) } fn modify(&mut self, f: impl FnOnce(&mut u32)) { let pos = self.cursor_pos * 4; // Extract digit let mut digit = (self.value >> pos) & 0xF; f(&mut digit); digit &= 0xF; // Clear digit self.value &=!(0xF << pos); // Insert digit self.value |= digit << pos; } fn render(&self, console: &mut Console) { self.render_with_cursor(console, true); } fn render_with_cursor(&self, console: &mut Console, with_cursor: bool) { write!(console, "0x").ok(); for cursor_pos in (0..8).rev() { let pos = cursor_pos * 4; let digit = (self.value >> pos) & 0xF; if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } write!(console, "{:X}", digit).ok(); if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } } } fn manipulate(&mut self, pad: &GamePad) { if pad.left_once() { self.cursor_left(); } if pad.right_once() { self.cursor_right(); } if pad.up_once() { self.increment(); } if pad.down_once() { self.decrement(); } } fn set_value(&mut self, value: u32) { self.value = value; } fn value(&self) -> u32 { self.value } } pub unsafe fn init_screens(top_fb: &mut [[u8; 3]]) { let brightness_level = 0xFEFE; (*(0x10141200 as *mut Volatile<u32>)).write(0x1007F); (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); (*(0x10202014 as *mut Volatile<u32>)).write(0x00000001); (*(0x1020200C as *mut Volatile<u32>)).update(|v| *v &= 0xFFFEFFFE); (*(0x10202240 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202A40 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202244 as *mut Volatile<u32>)).write(0x1023E); (*(0x10202A44 as *mut Volatile<u32>)).write(0x1023E); //Top screen let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.set_pixel_clock(0x1c2); top_fb_conf.set_hblank_timer(0xd1); top_fb_conf.reg(0x08).write(0x1c1); top_fb_conf.reg(0x0c).write(0x1c1); top_fb_conf.set_window_x_start(0); top_fb_conf.set_window_x_end(0xcf); top_fb_conf.set_window_y_start(0xd1); top_fb_conf.reg(0x1c).write(0x01c501c1); top_fb_conf.set_window_y_end(0x10000); top_fb_conf.set_vblank_timer(0x19d); top_fb_conf.reg(0x28).write(0x2); top_fb_conf.reg(0x2c).write(0x192); top_fb_conf.set_vtotal(0x192); top_fb_conf.set_vdisp(0x192); top_fb_conf.set_vertical_data_offset(0x1); top_fb_conf.reg(0x3c).write(0x2); top_fb_conf.reg(0x40).write(0x01960192); top_fb_conf.reg(0x44).write(0); top_fb_conf.reg(0x48).write(0); top_fb_conf.reg(0x5C).write(0x00f00190); top_fb_conf.reg(0x60).write(0x01c100d1); top_fb_conf.reg(0x64).write(0x01920002); top_fb_conf.set_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_format(0x80341); top_fb_conf.reg(0x74).write(0x10501); top_fb_conf.set_shown_buffer(0); top_fb_conf.set_alt_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_alt_buffer1(top_fb.as_ptr() as _);
// Set up color LUT top_fb_conf.set_color_lut_index(0); for i in 0..= 255 { top_fb_conf.set_color_lut_color(0x10101 * i); } setup_framebuffers(top_fb.as_ptr() as _); } unsafe fn setup_framebuffers(addr: u32) { (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.reg(0x68).write(addr); top_fb_conf.reg(0x6c).write(addr); top_fb_conf.reg(0x94).write(addr); top_fb_conf.reg(0x98).write(addr); // (*(0x10400568 as *mut Volatile<u32>)).write((u32)fbs[0].bottom); // (*(0x1040056c as *mut Volatile<u32>)).write((u32)fbs[1].bottom); //Set framebuffer format, framebuffer select and stride top_fb_conf.reg(0x70).write(0x80341); top_fb_conf.reg(0x78).write(0); top_fb_conf.reg(0x90).write(0x2D0); (*(0x10400570 as *mut Volatile<u32>)).write(0x80301); (*(0x10400578 as *mut Volatile<u32>)).write(0); (*(0x10400590 as *mut Volatile<u32>)).write(0x2D0); (*(0x10202204 as *mut Volatile<u32>)).write(0x00000000); //unset LCD fill (*(0x10202A04 as *mut Volatile<u32>)).write(0x00000000); } unsafe fn print_addr<T: PrimInt + UpperHex>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0x{value:0width$X} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 2 * mem::size_of::<T>(), label = label, ).ok(); } unsafe fn print_addr_bin<T: PrimInt + Binary>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0b{value:0width$b} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 8 * mem::size_of::<T>(), label = label, ).ok(); } fn busy_sleep(iterations: usize) { let n = 42; for _ in 0.. 15 * iterations { unsafe { read_volatile(&n); } } } fn u32_to_rgb(n: u32) -> [u8; 3] { let c = n.to_be_bytes(); [c[0], c[1], c[2]] }
top_fb_conf.set_buffer_stride(0x2D0); top_fb_conf.reg(0x9C).write(0);
random_line_split
main.rs
#![no_std] #![no_main] #![feature(global_asm, asm, naked_functions)] #![feature(panic_info_message)] #[macro_use] extern crate bitflags; use volatile::Volatile; use lcd::*; use gpu::FramebufferConfig; use core::ptr::{read_volatile, write_volatile}; use core::{str, fmt, cmp, mem}; use core::fmt::{Write, UpperHex, Binary}; use common::input::GamePad; use common::util::reg::*; use common::Console; use common::mem::arm11::*; use num_traits::PrimInt; mod lcd; mod gpu; mod panic; mod mpcore; mod boot11; mod exceptions; const SCREEN_TOP_WIDTH: usize = 400; const SCREEN_BOTTOM_WIDTH: usize = 320; const SCREEN_HEIGHT: usize = 240; const SCREEN_TOP_FBSIZE: usize = (3 * SCREEN_TOP_WIDTH * SCREEN_HEIGHT); const SCREEN_BOTTOM_FBSIZE: usize = (3 * SCREEN_BOTTOM_WIDTH * SCREEN_HEIGHT); global_asm!(r#" .section.text.start .global _start .align 4 .arm _start: cpsid aif, #0x13 ldr r0, =0x24000000 mov sp, r0 blx _rust_start .pool "#); const FERRIS: &[u8] = include_bytes!("../../ferris.data"); #[no_mangle] pub unsafe extern "C" fn _rust_start() ->! { exceptions::install_handlers(); // mpcore::enable_scu(); // mpcore::enable_smp_mode(); // mpcore::disable_interrupts(); // mpcore::clean_and_invalidate_data_cache(); // if mpcore::cpu_id() == 0 { // boot11::start_cpu(1, _start); // loop {} // } common::start(); busy_sleep(1000); let fb_top = core::slice::from_raw_parts_mut::<[u8; 3]>(0x18000000 as *mut _, SCREEN_TOP_FBSIZE / 3); init_screens(fb_top); { for (pixel, ferris_pixel) in fb_top.iter_mut().zip(FERRIS.chunks(3)) { write_volatile(&mut pixel[0], ferris_pixel[2]); write_volatile(&mut pixel[1], ferris_pixel[1]); write_volatile(&mut pixel[2], ferris_pixel[0]); } } let ref mut console = Console::new(fb_top, 400, 240); let mut pad = GamePad::new(); let mut bg_color = U32HexEditor::new(0); let mut fg_color = U32HexEditor::new(0xFFFFFF00); let mut fg_selected = false; loop { console.go_to(0, 0); let base = AXI_WRAM.end - 0x60; print_addr::<u32>(console, base + 0x10, "svc vector instr"); print_addr::<u32>(console, base + 0x10, "svc vector addr"); print_addr_bin::<u16>(console, 0x10146000, "pad"); writeln!(console, "cpsr = 0b{:032b}", mpcore::cpu_status_reg()).ok(); static mut N: u32 = 0; writeln!(console, "frame {}", N).ok(); N = N.wrapping_add(1); if!pad.l() && pad.y_once() { fg_selected =!fg_selected; } console.set_bg(u32_to_rgb(bg_color.value())); console.set_fg(u32_to_rgb(fg_color.value())); { if!fg_selected { bg_color.manipulate(&pad); } write!(console, "bg_color = ").ok(); bg_color.render_with_cursor(console,!fg_selected); writeln!(console, "").ok(); } { if fg_selected { fg_color.manipulate(&pad); } write!(console, "fg_color = ").ok(); fg_color.render_with_cursor(console, fg_selected); writeln!(console, "").ok(); } // trigger svc if pad.l() && pad.a() { asm!("svc 42"); } // trigger data abort if pad.l() && pad.b() { RW::<usize>::new(0).write(42); } // trigger prefetch abort if pad.l() && pad.y() { asm!("bkpt"); } // trigger undefined instruction if pad.l() && pad.x() { asm!(".word 0xFFFFFFFF"); } pad.poll(); } } struct U32HexEditor { cursor_pos: usize, value: u32, } impl U32HexEditor { const fn new(value: u32) -> Self { Self { cursor_pos: 0, value, } } fn cursor_left(&mut self) { self.cursor_pos += 1; self.cursor_pos %= 8; } fn cursor_right(&mut self) { self.cursor_pos += 8 - 1; self.cursor_pos %= 8; } fn increment(&mut self) { self.modify(|digit| { *digit += 1; *digit %= 16; }) } fn decrement(&mut self) { self.modify(|digit| { *digit += 16 - 1; *digit %= 16; }) } fn
(&mut self, f: impl FnOnce(&mut u32)) { let pos = self.cursor_pos * 4; // Extract digit let mut digit = (self.value >> pos) & 0xF; f(&mut digit); digit &= 0xF; // Clear digit self.value &=!(0xF << pos); // Insert digit self.value |= digit << pos; } fn render(&self, console: &mut Console) { self.render_with_cursor(console, true); } fn render_with_cursor(&self, console: &mut Console, with_cursor: bool) { write!(console, "0x").ok(); for cursor_pos in (0..8).rev() { let pos = cursor_pos * 4; let digit = (self.value >> pos) & 0xF; if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } write!(console, "{:X}", digit).ok(); if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } } } fn manipulate(&mut self, pad: &GamePad) { if pad.left_once() { self.cursor_left(); } if pad.right_once() { self.cursor_right(); } if pad.up_once() { self.increment(); } if pad.down_once() { self.decrement(); } } fn set_value(&mut self, value: u32) { self.value = value; } fn value(&self) -> u32 { self.value } } pub unsafe fn init_screens(top_fb: &mut [[u8; 3]]) { let brightness_level = 0xFEFE; (*(0x10141200 as *mut Volatile<u32>)).write(0x1007F); (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); (*(0x10202014 as *mut Volatile<u32>)).write(0x00000001); (*(0x1020200C as *mut Volatile<u32>)).update(|v| *v &= 0xFFFEFFFE); (*(0x10202240 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202A40 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202244 as *mut Volatile<u32>)).write(0x1023E); (*(0x10202A44 as *mut Volatile<u32>)).write(0x1023E); //Top screen let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.set_pixel_clock(0x1c2); top_fb_conf.set_hblank_timer(0xd1); top_fb_conf.reg(0x08).write(0x1c1); top_fb_conf.reg(0x0c).write(0x1c1); top_fb_conf.set_window_x_start(0); top_fb_conf.set_window_x_end(0xcf); top_fb_conf.set_window_y_start(0xd1); top_fb_conf.reg(0x1c).write(0x01c501c1); top_fb_conf.set_window_y_end(0x10000); top_fb_conf.set_vblank_timer(0x19d); top_fb_conf.reg(0x28).write(0x2); top_fb_conf.reg(0x2c).write(0x192); top_fb_conf.set_vtotal(0x192); top_fb_conf.set_vdisp(0x192); top_fb_conf.set_vertical_data_offset(0x1); top_fb_conf.reg(0x3c).write(0x2); top_fb_conf.reg(0x40).write(0x01960192); top_fb_conf.reg(0x44).write(0); top_fb_conf.reg(0x48).write(0); top_fb_conf.reg(0x5C).write(0x00f00190); top_fb_conf.reg(0x60).write(0x01c100d1); top_fb_conf.reg(0x64).write(0x01920002); top_fb_conf.set_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_format(0x80341); top_fb_conf.reg(0x74).write(0x10501); top_fb_conf.set_shown_buffer(0); top_fb_conf.set_alt_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_alt_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_stride(0x2D0); top_fb_conf.reg(0x9C).write(0); // Set up color LUT top_fb_conf.set_color_lut_index(0); for i in 0..= 255 { top_fb_conf.set_color_lut_color(0x10101 * i); } setup_framebuffers(top_fb.as_ptr() as _); } unsafe fn setup_framebuffers(addr: u32) { (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.reg(0x68).write(addr); top_fb_conf.reg(0x6c).write(addr); top_fb_conf.reg(0x94).write(addr); top_fb_conf.reg(0x98).write(addr); // (*(0x10400568 as *mut Volatile<u32>)).write((u32)fbs[0].bottom); // (*(0x1040056c as *mut Volatile<u32>)).write((u32)fbs[1].bottom); //Set framebuffer format, framebuffer select and stride top_fb_conf.reg(0x70).write(0x80341); top_fb_conf.reg(0x78).write(0); top_fb_conf.reg(0x90).write(0x2D0); (*(0x10400570 as *mut Volatile<u32>)).write(0x80301); (*(0x10400578 as *mut Volatile<u32>)).write(0); (*(0x10400590 as *mut Volatile<u32>)).write(0x2D0); (*(0x10202204 as *mut Volatile<u32>)).write(0x00000000); //unset LCD fill (*(0x10202A04 as *mut Volatile<u32>)).write(0x00000000); } unsafe fn print_addr<T: PrimInt + UpperHex>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0x{value:0width$X} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 2 * mem::size_of::<T>(), label = label, ).ok(); } unsafe fn print_addr_bin<T: PrimInt + Binary>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0b{value:0width$b} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 8 * mem::size_of::<T>(), label = label, ).ok(); } fn busy_sleep(iterations: usize) { let n = 42; for _ in 0.. 15 * iterations { unsafe { read_volatile(&n); } } } fn u32_to_rgb(n: u32) -> [u8; 3] { let c = n.to_be_bytes(); [c[0], c[1], c[2]] }
modify
identifier_name
main.rs
#![no_std] #![no_main] #![feature(global_asm, asm, naked_functions)] #![feature(panic_info_message)] #[macro_use] extern crate bitflags; use volatile::Volatile; use lcd::*; use gpu::FramebufferConfig; use core::ptr::{read_volatile, write_volatile}; use core::{str, fmt, cmp, mem}; use core::fmt::{Write, UpperHex, Binary}; use common::input::GamePad; use common::util::reg::*; use common::Console; use common::mem::arm11::*; use num_traits::PrimInt; mod lcd; mod gpu; mod panic; mod mpcore; mod boot11; mod exceptions; const SCREEN_TOP_WIDTH: usize = 400; const SCREEN_BOTTOM_WIDTH: usize = 320; const SCREEN_HEIGHT: usize = 240; const SCREEN_TOP_FBSIZE: usize = (3 * SCREEN_TOP_WIDTH * SCREEN_HEIGHT); const SCREEN_BOTTOM_FBSIZE: usize = (3 * SCREEN_BOTTOM_WIDTH * SCREEN_HEIGHT); global_asm!(r#" .section.text.start .global _start .align 4 .arm _start: cpsid aif, #0x13 ldr r0, =0x24000000 mov sp, r0 blx _rust_start .pool "#); const FERRIS: &[u8] = include_bytes!("../../ferris.data"); #[no_mangle] pub unsafe extern "C" fn _rust_start() ->! { exceptions::install_handlers(); // mpcore::enable_scu(); // mpcore::enable_smp_mode(); // mpcore::disable_interrupts(); // mpcore::clean_and_invalidate_data_cache(); // if mpcore::cpu_id() == 0 { // boot11::start_cpu(1, _start); // loop {} // } common::start(); busy_sleep(1000); let fb_top = core::slice::from_raw_parts_mut::<[u8; 3]>(0x18000000 as *mut _, SCREEN_TOP_FBSIZE / 3); init_screens(fb_top); { for (pixel, ferris_pixel) in fb_top.iter_mut().zip(FERRIS.chunks(3)) { write_volatile(&mut pixel[0], ferris_pixel[2]); write_volatile(&mut pixel[1], ferris_pixel[1]); write_volatile(&mut pixel[2], ferris_pixel[0]); } } let ref mut console = Console::new(fb_top, 400, 240); let mut pad = GamePad::new(); let mut bg_color = U32HexEditor::new(0); let mut fg_color = U32HexEditor::new(0xFFFFFF00); let mut fg_selected = false; loop { console.go_to(0, 0); let base = AXI_WRAM.end - 0x60; print_addr::<u32>(console, base + 0x10, "svc vector instr"); print_addr::<u32>(console, base + 0x10, "svc vector addr"); print_addr_bin::<u16>(console, 0x10146000, "pad"); writeln!(console, "cpsr = 0b{:032b}", mpcore::cpu_status_reg()).ok(); static mut N: u32 = 0; writeln!(console, "frame {}", N).ok(); N = N.wrapping_add(1); if!pad.l() && pad.y_once() { fg_selected =!fg_selected; } console.set_bg(u32_to_rgb(bg_color.value())); console.set_fg(u32_to_rgb(fg_color.value())); { if!fg_selected { bg_color.manipulate(&pad); } write!(console, "bg_color = ").ok(); bg_color.render_with_cursor(console,!fg_selected); writeln!(console, "").ok(); } { if fg_selected { fg_color.manipulate(&pad); } write!(console, "fg_color = ").ok(); fg_color.render_with_cursor(console, fg_selected); writeln!(console, "").ok(); } // trigger svc if pad.l() && pad.a() { asm!("svc 42"); } // trigger data abort if pad.l() && pad.b() { RW::<usize>::new(0).write(42); } // trigger prefetch abort if pad.l() && pad.y() { asm!("bkpt"); } // trigger undefined instruction if pad.l() && pad.x() { asm!(".word 0xFFFFFFFF"); } pad.poll(); } } struct U32HexEditor { cursor_pos: usize, value: u32, } impl U32HexEditor { const fn new(value: u32) -> Self { Self { cursor_pos: 0, value, } } fn cursor_left(&mut self) { self.cursor_pos += 1; self.cursor_pos %= 8; } fn cursor_right(&mut self) { self.cursor_pos += 8 - 1; self.cursor_pos %= 8; } fn increment(&mut self) { self.modify(|digit| { *digit += 1; *digit %= 16; }) } fn decrement(&mut self) { self.modify(|digit| { *digit += 16 - 1; *digit %= 16; }) } fn modify(&mut self, f: impl FnOnce(&mut u32)) { let pos = self.cursor_pos * 4; // Extract digit let mut digit = (self.value >> pos) & 0xF; f(&mut digit); digit &= 0xF; // Clear digit self.value &=!(0xF << pos); // Insert digit self.value |= digit << pos; } fn render(&self, console: &mut Console) { self.render_with_cursor(console, true); } fn render_with_cursor(&self, console: &mut Console, with_cursor: bool) { write!(console, "0x").ok(); for cursor_pos in (0..8).rev() { let pos = cursor_pos * 4; let digit = (self.value >> pos) & 0xF; if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } write!(console, "{:X}", digit).ok(); if with_cursor && cursor_pos == self.cursor_pos { console.swap_colors(); } } } fn manipulate(&mut self, pad: &GamePad) { if pad.left_once() { self.cursor_left(); } if pad.right_once() { self.cursor_right(); } if pad.up_once() { self.increment(); } if pad.down_once() { self.decrement(); } } fn set_value(&mut self, value: u32) { self.value = value; } fn value(&self) -> u32 { self.value } } pub unsafe fn init_screens(top_fb: &mut [[u8; 3]]) { let brightness_level = 0xFEFE; (*(0x10141200 as *mut Volatile<u32>)).write(0x1007F); (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); (*(0x10202014 as *mut Volatile<u32>)).write(0x00000001); (*(0x1020200C as *mut Volatile<u32>)).update(|v| *v &= 0xFFFEFFFE); (*(0x10202240 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202A40 as *mut Volatile<u32>)).write(brightness_level); (*(0x10202244 as *mut Volatile<u32>)).write(0x1023E); (*(0x10202A44 as *mut Volatile<u32>)).write(0x1023E); //Top screen let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.set_pixel_clock(0x1c2); top_fb_conf.set_hblank_timer(0xd1); top_fb_conf.reg(0x08).write(0x1c1); top_fb_conf.reg(0x0c).write(0x1c1); top_fb_conf.set_window_x_start(0); top_fb_conf.set_window_x_end(0xcf); top_fb_conf.set_window_y_start(0xd1); top_fb_conf.reg(0x1c).write(0x01c501c1); top_fb_conf.set_window_y_end(0x10000); top_fb_conf.set_vblank_timer(0x19d); top_fb_conf.reg(0x28).write(0x2); top_fb_conf.reg(0x2c).write(0x192); top_fb_conf.set_vtotal(0x192); top_fb_conf.set_vdisp(0x192); top_fb_conf.set_vertical_data_offset(0x1); top_fb_conf.reg(0x3c).write(0x2); top_fb_conf.reg(0x40).write(0x01960192); top_fb_conf.reg(0x44).write(0); top_fb_conf.reg(0x48).write(0); top_fb_conf.reg(0x5C).write(0x00f00190); top_fb_conf.reg(0x60).write(0x01c100d1); top_fb_conf.reg(0x64).write(0x01920002); top_fb_conf.set_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_format(0x80341); top_fb_conf.reg(0x74).write(0x10501); top_fb_conf.set_shown_buffer(0); top_fb_conf.set_alt_buffer0(top_fb.as_ptr() as _); top_fb_conf.set_alt_buffer1(top_fb.as_ptr() as _); top_fb_conf.set_buffer_stride(0x2D0); top_fb_conf.reg(0x9C).write(0); // Set up color LUT top_fb_conf.set_color_lut_index(0); for i in 0..= 255 { top_fb_conf.set_color_lut_color(0x10101 * i); } setup_framebuffers(top_fb.as_ptr() as _); } unsafe fn setup_framebuffers(addr: u32) { (*(0x10202204 as *mut Volatile<u32>)).write(0x01000000); //set LCD fill black to hide potential garbage -- NFIRM does it before firmlaunching (*(0x10202A04 as *mut Volatile<u32>)).write(0x01000000); let mut top_fb_conf = gpu::FramebufferConfig::top(); top_fb_conf.reg(0x68).write(addr); top_fb_conf.reg(0x6c).write(addr); top_fb_conf.reg(0x94).write(addr); top_fb_conf.reg(0x98).write(addr); // (*(0x10400568 as *mut Volatile<u32>)).write((u32)fbs[0].bottom); // (*(0x1040056c as *mut Volatile<u32>)).write((u32)fbs[1].bottom); //Set framebuffer format, framebuffer select and stride top_fb_conf.reg(0x70).write(0x80341); top_fb_conf.reg(0x78).write(0); top_fb_conf.reg(0x90).write(0x2D0); (*(0x10400570 as *mut Volatile<u32>)).write(0x80301); (*(0x10400578 as *mut Volatile<u32>)).write(0); (*(0x10400590 as *mut Volatile<u32>)).write(0x2D0); (*(0x10202204 as *mut Volatile<u32>)).write(0x00000000); //unset LCD fill (*(0x10202A04 as *mut Volatile<u32>)).write(0x00000000); } unsafe fn print_addr<T: PrimInt + UpperHex>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0x{value:0width$X} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 2 * mem::size_of::<T>(), label = label, ).ok(); } unsafe fn print_addr_bin<T: PrimInt + Binary>(console: &mut Console, addr: usize, label: &'static str) { writeln!(console, "[0x{addr:08X}] = 0b{value:0width$b} {label}", addr = addr, value = RO::<T>::new(addr).read(), width = 8 * mem::size_of::<T>(), label = label, ).ok(); } fn busy_sleep(iterations: usize) { let n = 42; for _ in 0.. 15 * iterations { unsafe { read_volatile(&n); } } } fn u32_to_rgb(n: u32) -> [u8; 3]
{ let c = n.to_be_bytes(); [c[0], c[1], c[2]] }
identifier_body
call.rs
use super::arch::*; use super::data::{Map, SigAction, Stat, StatVfs, TimeSpec}; use super::error::Result; use super::flag::*; use super::number::*; use core::{mem, ptr}; // Signal restorer extern "C" fn restorer() ->! { sigreturn().unwrap(); unreachable!(); } /// Close a file pub fn
(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_CLOSE, fd) } } /// Get the current system time pub fn clock_gettime(clock: usize, tp: &mut TimeSpec) -> Result<usize> { unsafe { syscall2(SYS_CLOCK_GETTIME, clock, tp as *mut TimeSpec as usize) } } /// Copy and transform a file descriptor pub fn dup(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_DUP, fd, buf.as_ptr() as usize, buf.len()) } } /// Copy and transform a file descriptor pub fn dup2(fd: usize, newfd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall4(SYS_DUP2, fd, newfd, buf.as_ptr() as usize, buf.len()) } } /// Exit the current process pub fn exit(status: usize) -> Result<usize> { unsafe { syscall1(SYS_EXIT, status) } } /// Change file permissions pub fn fchmod(fd: usize, mode: u16) -> Result<usize> { unsafe { syscall2(SYS_FCHMOD, fd, mode as usize) } } /// Change file ownership pub fn fchown(fd: usize, uid: u32, gid: u32) -> Result<usize> { unsafe { syscall3(SYS_FCHOWN, fd, uid as usize, gid as usize) } } /// Change file descriptor flags pub fn fcntl(fd: usize, cmd: usize, arg: usize) -> Result<usize> { unsafe { syscall3(SYS_FCNTL, fd, cmd, arg) } } /// Map a file into memory, but with the ability to set the address to map into, either as a hint /// or as a requirement of the map. /// /// # Errors /// `EACCES` - the file descriptor was not open for reading /// `EBADF` - if the file descriptor was invalid /// `ENODEV` - mmapping was not supported /// `EINVAL` - invalid combination of flags /// `EEXIST` - if [`MapFlags::MAP_FIXED`] was set, and the address specified was already in use. /// pub unsafe fn fmap(fd: usize, map: &Map) -> Result<usize> { syscall3(SYS_FMAP, fd, map as *const Map as usize, mem::size_of::<Map>()) } /// Unmap whole (or partial) continous memory-mapped files pub unsafe fn funmap(addr: usize, len: usize) -> Result<usize> { syscall2(SYS_FUNMAP, addr, len) } /// Retrieve the canonical path of a file pub fn fpath(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_FPATH, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Rename a file pub fn frename<T: AsRef<str>>(fd: usize, path: T) -> Result<usize> { unsafe { syscall3(SYS_FRENAME, fd, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Get metadata about a file pub fn fstat(fd: usize, stat: &mut Stat) -> Result<usize> { unsafe { syscall3(SYS_FSTAT, fd, stat as *mut Stat as usize, mem::size_of::<Stat>()) } } /// Get metadata about a filesystem pub fn fstatvfs(fd: usize, stat: &mut StatVfs) -> Result<usize> { unsafe { syscall3(SYS_FSTATVFS, fd, stat as *mut StatVfs as usize, mem::size_of::<StatVfs>()) } } /// Sync a file descriptor to its underlying medium pub fn fsync(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_FSYNC, fd) } } /// Truncate or extend a file to a specified length pub fn ftruncate(fd: usize, len: usize) -> Result<usize> { unsafe { syscall2(SYS_FTRUNCATE, fd, len) } } // Change modify and/or access times pub fn futimens(fd: usize, times: &[TimeSpec]) -> Result<usize> { unsafe { syscall3(SYS_FUTIMENS, fd, times.as_ptr() as usize, times.len() * mem::size_of::<TimeSpec>()) } } /// Fast userspace mutex pub unsafe fn futex(addr: *mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) -> Result<usize> { syscall5(SYS_FUTEX, addr as usize, op, (val as isize) as usize, val2, addr2 as usize) } /// Get the effective group ID pub fn getegid() -> Result<usize> { unsafe { syscall0(SYS_GETEGID) } } /// Get the effective namespace pub fn getens() -> Result<usize> { unsafe { syscall0(SYS_GETENS) } } /// Get the effective user ID pub fn geteuid() -> Result<usize> { unsafe { syscall0(SYS_GETEUID) } } /// Get the current group ID pub fn getgid() -> Result<usize> { unsafe { syscall0(SYS_GETGID) } } /// Get the current namespace pub fn getns() -> Result<usize> { unsafe { syscall0(SYS_GETNS) } } /// Get the current process ID pub fn getpid() -> Result<usize> { unsafe { syscall0(SYS_GETPID) } } /// Get the process group ID pub fn getpgid(pid: usize) -> Result<usize> { unsafe { syscall1(SYS_GETPGID, pid) } } /// Get the parent process ID pub fn getppid() -> Result<usize> { unsafe { syscall0(SYS_GETPPID) } } /// Get the current user ID pub fn getuid() -> Result<usize> { unsafe { syscall0(SYS_GETUID) } } /// Set the I/O privilege level /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `EINVAL` - `level > 3` pub unsafe fn iopl(level: usize) -> Result<usize> { syscall1(SYS_IOPL, level) } /// Send a signal `sig` to the process identified by `pid` pub fn kill(pid: usize, sig: usize) -> Result<usize> { unsafe { syscall2(SYS_KILL, pid, sig) } } /// Create a link to a file pub unsafe fn link(old: *const u8, new: *const u8) -> Result<usize> { syscall2(SYS_LINK, old as usize, new as usize) } /// Seek to `offset` bytes in a file descriptor pub fn lseek(fd: usize, offset: isize, whence: usize) -> Result<usize> { unsafe { syscall3(SYS_LSEEK, fd, offset as usize, whence) } } /// Make a new scheme namespace pub fn mkns(schemes: &[[usize; 2]]) -> Result<usize> { unsafe { syscall2(SYS_MKNS, schemes.as_ptr() as usize, schemes.len()) } } /// Change mapping flags pub unsafe fn mprotect(addr: usize, size: usize, flags: MapFlags) -> Result<usize> { syscall3(SYS_MPROTECT, addr, size, flags.bits()) } /// Sleep for the time specified in `req` pub fn nanosleep(req: &TimeSpec, rem: &mut TimeSpec) -> Result<usize> { unsafe { syscall2(SYS_NANOSLEEP, req as *const TimeSpec as usize, rem as *mut TimeSpec as usize) } } /// Open a file pub fn open<T: AsRef<str>>(path: T, flags: usize) -> Result<usize> { unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) } } /// Allocate frames, linearly in physical memory. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc(size: usize) -> Result<usize> { syscall1(SYS_PHYSALLOC, size) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`. /// /// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience /// function is based on. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc2(size: usize, flags: usize) -> Result<usize> { let mut ret = 1usize; physalloc3(size, flags, &mut ret) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated /// for this operation to succeed. The return value is the offset of the first frame, and `min` is /// overwritten with the number of frames actually allocated. /// /// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`]. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory /// * `EINVAL` - `min = 0` pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result<usize> { syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize) } /// Free physically allocated pages /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physfree(physical_address: usize, size: usize) -> Result<usize> { syscall2(SYS_PHYSFREE, physical_address, size) } /// Map physical memory to virtual memory /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physmap(physical_address: usize, size: usize, flags: PhysmapFlags) -> Result<usize> { syscall3(SYS_PHYSMAP, physical_address, size, flags.bits()) } /// Create a pair of file descriptors referencing the read and write ends of a pipe pub fn pipe2(fds: &mut [usize; 2], flags: usize) -> Result<usize> { unsafe { syscall2(SYS_PIPE2, fds.as_ptr() as usize, flags) } } /// Read from a file descriptor into a buffer pub fn read(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_READ, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Remove a directory pub fn rmdir<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_RMDIR, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Set the process group ID pub fn setpgid(pid: usize, pgid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETPGID, pid, pgid) } } /// Set the current process group IDs pub fn setregid(rgid: usize, egid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREGID, rgid, egid) } } /// Make a new scheme namespace pub fn setrens(rns: usize, ens: usize) -> Result<usize> { unsafe { syscall2(SYS_SETRENS, rns, ens) } } /// Set the current process user IDs pub fn setreuid(ruid: usize, euid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREUID, ruid, euid) } } /// Set up a signal handler pub fn sigaction(sig: usize, act: Option<&SigAction>, oldact: Option<&mut SigAction>) -> Result<usize> { unsafe { syscall4(SYS_SIGACTION, sig, act.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldact.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize, restorer as usize) } } /// Get and/or set signal masks pub fn sigprocmask(how: usize, set: Option<&[u64; 2]>, oldset: Option<&mut [u64; 2]>) -> Result<usize> { unsafe { syscall3(SYS_SIGPROCMASK, how, set.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldset.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize) } } // Return from signal handler pub fn sigreturn() -> Result<usize> { unsafe { syscall0(SYS_SIGRETURN) } } /// Set the file mode creation mask pub fn umask(mask: usize) -> Result<usize> { unsafe { syscall1(SYS_UMASK, mask) } } /// Remove a file pub fn unlink<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_UNLINK, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Convert a virtual address to a physical one /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn virttophys(virtual_address: usize) -> Result<usize> { syscall1(SYS_VIRTTOPHYS, virtual_address) } /// Check if a child process has exited or received a signal pub fn waitpid(pid: usize, status: &mut usize, options: WaitFlags) -> Result<usize> { unsafe { syscall3(SYS_WAITPID, pid, status as *mut usize as usize, options.bits()) } } /// Write a buffer to a file descriptor /// /// The kernel will attempt to write the bytes in `buf` to the file descriptor `fd`, returning /// either an `Err`, explained below, or `Ok(count)` where `count` is the number of bytes which /// were written. /// /// # Errors /// /// * `EAGAIN` - the file descriptor was opened with `O_NONBLOCK` and writing would block /// * `EBADF` - the file descriptor is not valid or is not open for writing /// * `EFAULT` - `buf` does not point to the process's addressible memory /// * `EIO` - an I/O error occurred /// * `ENOSPC` - the device containing the file descriptor has no room for data /// * `EPIPE` - the file descriptor refers to a pipe or socket whose reading end is closed pub fn write(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_WRITE, fd, buf.as_ptr() as usize, buf.len()) } } /// Yield the process's time slice to the kernel /// /// This function will return Ok(0) on success pub fn sched_yield() -> Result<usize> { unsafe { syscall0(SYS_YIELD) } }
close
identifier_name
call.rs
use super::arch::*; use super::data::{Map, SigAction, Stat, StatVfs, TimeSpec}; use super::error::Result; use super::flag::*; use super::number::*; use core::{mem, ptr}; // Signal restorer extern "C" fn restorer() ->! { sigreturn().unwrap(); unreachable!(); } /// Close a file pub fn close(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_CLOSE, fd) } } /// Get the current system time pub fn clock_gettime(clock: usize, tp: &mut TimeSpec) -> Result<usize> { unsafe { syscall2(SYS_CLOCK_GETTIME, clock, tp as *mut TimeSpec as usize) } } /// Copy and transform a file descriptor pub fn dup(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_DUP, fd, buf.as_ptr() as usize, buf.len()) } } /// Copy and transform a file descriptor pub fn dup2(fd: usize, newfd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall4(SYS_DUP2, fd, newfd, buf.as_ptr() as usize, buf.len()) } } /// Exit the current process pub fn exit(status: usize) -> Result<usize> { unsafe { syscall1(SYS_EXIT, status) } } /// Change file permissions pub fn fchmod(fd: usize, mode: u16) -> Result<usize> { unsafe { syscall2(SYS_FCHMOD, fd, mode as usize) } } /// Change file ownership pub fn fchown(fd: usize, uid: u32, gid: u32) -> Result<usize> { unsafe { syscall3(SYS_FCHOWN, fd, uid as usize, gid as usize) } } /// Change file descriptor flags pub fn fcntl(fd: usize, cmd: usize, arg: usize) -> Result<usize> { unsafe { syscall3(SYS_FCNTL, fd, cmd, arg) } } /// Map a file into memory, but with the ability to set the address to map into, either as a hint /// or as a requirement of the map. /// /// # Errors /// `EACCES` - the file descriptor was not open for reading /// `EBADF` - if the file descriptor was invalid /// `ENODEV` - mmapping was not supported /// `EINVAL` - invalid combination of flags /// `EEXIST` - if [`MapFlags::MAP_FIXED`] was set, and the address specified was already in use. /// pub unsafe fn fmap(fd: usize, map: &Map) -> Result<usize> { syscall3(SYS_FMAP, fd, map as *const Map as usize, mem::size_of::<Map>()) } /// Unmap whole (or partial) continous memory-mapped files pub unsafe fn funmap(addr: usize, len: usize) -> Result<usize> { syscall2(SYS_FUNMAP, addr, len) } /// Retrieve the canonical path of a file pub fn fpath(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_FPATH, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Rename a file pub fn frename<T: AsRef<str>>(fd: usize, path: T) -> Result<usize> { unsafe { syscall3(SYS_FRENAME, fd, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Get metadata about a file pub fn fstat(fd: usize, stat: &mut Stat) -> Result<usize> { unsafe { syscall3(SYS_FSTAT, fd, stat as *mut Stat as usize, mem::size_of::<Stat>()) } } /// Get metadata about a filesystem pub fn fstatvfs(fd: usize, stat: &mut StatVfs) -> Result<usize> { unsafe { syscall3(SYS_FSTATVFS, fd, stat as *mut StatVfs as usize, mem::size_of::<StatVfs>()) } } /// Sync a file descriptor to its underlying medium pub fn fsync(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_FSYNC, fd) } } /// Truncate or extend a file to a specified length pub fn ftruncate(fd: usize, len: usize) -> Result<usize> { unsafe { syscall2(SYS_FTRUNCATE, fd, len) } } // Change modify and/or access times pub fn futimens(fd: usize, times: &[TimeSpec]) -> Result<usize> { unsafe { syscall3(SYS_FUTIMENS, fd, times.as_ptr() as usize, times.len() * mem::size_of::<TimeSpec>()) } } /// Fast userspace mutex pub unsafe fn futex(addr: *mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) -> Result<usize> { syscall5(SYS_FUTEX, addr as usize, op, (val as isize) as usize, val2, addr2 as usize) } /// Get the effective group ID pub fn getegid() -> Result<usize> { unsafe { syscall0(SYS_GETEGID) } } /// Get the effective namespace pub fn getens() -> Result<usize> { unsafe { syscall0(SYS_GETENS) } } /// Get the effective user ID pub fn geteuid() -> Result<usize> { unsafe { syscall0(SYS_GETEUID) } } /// Get the current group ID pub fn getgid() -> Result<usize> { unsafe { syscall0(SYS_GETGID) } } /// Get the current namespace pub fn getns() -> Result<usize> { unsafe { syscall0(SYS_GETNS) } } /// Get the current process ID pub fn getpid() -> Result<usize> { unsafe { syscall0(SYS_GETPID) } } /// Get the process group ID pub fn getpgid(pid: usize) -> Result<usize> { unsafe { syscall1(SYS_GETPGID, pid) } } /// Get the parent process ID pub fn getppid() -> Result<usize> { unsafe { syscall0(SYS_GETPPID) } } /// Get the current user ID pub fn getuid() -> Result<usize> { unsafe { syscall0(SYS_GETUID) } } /// Set the I/O privilege level /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `EINVAL` - `level > 3` pub unsafe fn iopl(level: usize) -> Result<usize> { syscall1(SYS_IOPL, level) } /// Send a signal `sig` to the process identified by `pid` pub fn kill(pid: usize, sig: usize) -> Result<usize> { unsafe { syscall2(SYS_KILL, pid, sig) } } /// Create a link to a file pub unsafe fn link(old: *const u8, new: *const u8) -> Result<usize> { syscall2(SYS_LINK, old as usize, new as usize) } /// Seek to `offset` bytes in a file descriptor pub fn lseek(fd: usize, offset: isize, whence: usize) -> Result<usize> { unsafe { syscall3(SYS_LSEEK, fd, offset as usize, whence) } } /// Make a new scheme namespace pub fn mkns(schemes: &[[usize; 2]]) -> Result<usize> { unsafe { syscall2(SYS_MKNS, schemes.as_ptr() as usize, schemes.len()) } } /// Change mapping flags pub unsafe fn mprotect(addr: usize, size: usize, flags: MapFlags) -> Result<usize> { syscall3(SYS_MPROTECT, addr, size, flags.bits()) } /// Sleep for the time specified in `req` pub fn nanosleep(req: &TimeSpec, rem: &mut TimeSpec) -> Result<usize> { unsafe { syscall2(SYS_NANOSLEEP, req as *const TimeSpec as usize, rem as *mut TimeSpec as usize) } } /// Open a file pub fn open<T: AsRef<str>>(path: T, flags: usize) -> Result<usize> { unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) } } /// Allocate frames, linearly in physical memory. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc(size: usize) -> Result<usize> { syscall1(SYS_PHYSALLOC, size) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`. /// /// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience /// function is based on. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc2(size: usize, flags: usize) -> Result<usize> { let mut ret = 1usize; physalloc3(size, flags, &mut ret) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated /// for this operation to succeed. The return value is the offset of the first frame, and `min` is /// overwritten with the number of frames actually allocated. /// /// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`]. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory /// * `EINVAL` - `min = 0` pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result<usize> { syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize) } /// Free physically allocated pages /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physfree(physical_address: usize, size: usize) -> Result<usize> { syscall2(SYS_PHYSFREE, physical_address, size) } /// Map physical memory to virtual memory /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physmap(physical_address: usize, size: usize, flags: PhysmapFlags) -> Result<usize> { syscall3(SYS_PHYSMAP, physical_address, size, flags.bits()) } /// Create a pair of file descriptors referencing the read and write ends of a pipe pub fn pipe2(fds: &mut [usize; 2], flags: usize) -> Result<usize> { unsafe { syscall2(SYS_PIPE2, fds.as_ptr() as usize, flags) } } /// Read from a file descriptor into a buffer pub fn read(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_READ, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Remove a directory pub fn rmdir<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_RMDIR, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Set the process group ID pub fn setpgid(pid: usize, pgid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETPGID, pid, pgid) } } /// Set the current process group IDs pub fn setregid(rgid: usize, egid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREGID, rgid, egid) } } /// Make a new scheme namespace pub fn setrens(rns: usize, ens: usize) -> Result<usize>
/// Set the current process user IDs pub fn setreuid(ruid: usize, euid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREUID, ruid, euid) } } /// Set up a signal handler pub fn sigaction(sig: usize, act: Option<&SigAction>, oldact: Option<&mut SigAction>) -> Result<usize> { unsafe { syscall4(SYS_SIGACTION, sig, act.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldact.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize, restorer as usize) } } /// Get and/or set signal masks pub fn sigprocmask(how: usize, set: Option<&[u64; 2]>, oldset: Option<&mut [u64; 2]>) -> Result<usize> { unsafe { syscall3(SYS_SIGPROCMASK, how, set.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldset.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize) } } // Return from signal handler pub fn sigreturn() -> Result<usize> { unsafe { syscall0(SYS_SIGRETURN) } } /// Set the file mode creation mask pub fn umask(mask: usize) -> Result<usize> { unsafe { syscall1(SYS_UMASK, mask) } } /// Remove a file pub fn unlink<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_UNLINK, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Convert a virtual address to a physical one /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn virttophys(virtual_address: usize) -> Result<usize> { syscall1(SYS_VIRTTOPHYS, virtual_address) } /// Check if a child process has exited or received a signal pub fn waitpid(pid: usize, status: &mut usize, options: WaitFlags) -> Result<usize> { unsafe { syscall3(SYS_WAITPID, pid, status as *mut usize as usize, options.bits()) } } /// Write a buffer to a file descriptor /// /// The kernel will attempt to write the bytes in `buf` to the file descriptor `fd`, returning /// either an `Err`, explained below, or `Ok(count)` where `count` is the number of bytes which /// were written. /// /// # Errors /// /// * `EAGAIN` - the file descriptor was opened with `O_NONBLOCK` and writing would block /// * `EBADF` - the file descriptor is not valid or is not open for writing /// * `EFAULT` - `buf` does not point to the process's addressible memory /// * `EIO` - an I/O error occurred /// * `ENOSPC` - the device containing the file descriptor has no room for data /// * `EPIPE` - the file descriptor refers to a pipe or socket whose reading end is closed pub fn write(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_WRITE, fd, buf.as_ptr() as usize, buf.len()) } } /// Yield the process's time slice to the kernel /// /// This function will return Ok(0) on success pub fn sched_yield() -> Result<usize> { unsafe { syscall0(SYS_YIELD) } }
{ unsafe { syscall2(SYS_SETRENS, rns, ens) } }
identifier_body
call.rs
use super::arch::*; use super::data::{Map, SigAction, Stat, StatVfs, TimeSpec}; use super::error::Result; use super::flag::*; use super::number::*; use core::{mem, ptr}; // Signal restorer extern "C" fn restorer() ->! { sigreturn().unwrap(); unreachable!(); } /// Close a file pub fn close(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_CLOSE, fd) }
unsafe { syscall2(SYS_CLOCK_GETTIME, clock, tp as *mut TimeSpec as usize) } } /// Copy and transform a file descriptor pub fn dup(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_DUP, fd, buf.as_ptr() as usize, buf.len()) } } /// Copy and transform a file descriptor pub fn dup2(fd: usize, newfd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall4(SYS_DUP2, fd, newfd, buf.as_ptr() as usize, buf.len()) } } /// Exit the current process pub fn exit(status: usize) -> Result<usize> { unsafe { syscall1(SYS_EXIT, status) } } /// Change file permissions pub fn fchmod(fd: usize, mode: u16) -> Result<usize> { unsafe { syscall2(SYS_FCHMOD, fd, mode as usize) } } /// Change file ownership pub fn fchown(fd: usize, uid: u32, gid: u32) -> Result<usize> { unsafe { syscall3(SYS_FCHOWN, fd, uid as usize, gid as usize) } } /// Change file descriptor flags pub fn fcntl(fd: usize, cmd: usize, arg: usize) -> Result<usize> { unsafe { syscall3(SYS_FCNTL, fd, cmd, arg) } } /// Map a file into memory, but with the ability to set the address to map into, either as a hint /// or as a requirement of the map. /// /// # Errors /// `EACCES` - the file descriptor was not open for reading /// `EBADF` - if the file descriptor was invalid /// `ENODEV` - mmapping was not supported /// `EINVAL` - invalid combination of flags /// `EEXIST` - if [`MapFlags::MAP_FIXED`] was set, and the address specified was already in use. /// pub unsafe fn fmap(fd: usize, map: &Map) -> Result<usize> { syscall3(SYS_FMAP, fd, map as *const Map as usize, mem::size_of::<Map>()) } /// Unmap whole (or partial) continous memory-mapped files pub unsafe fn funmap(addr: usize, len: usize) -> Result<usize> { syscall2(SYS_FUNMAP, addr, len) } /// Retrieve the canonical path of a file pub fn fpath(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_FPATH, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Rename a file pub fn frename<T: AsRef<str>>(fd: usize, path: T) -> Result<usize> { unsafe { syscall3(SYS_FRENAME, fd, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Get metadata about a file pub fn fstat(fd: usize, stat: &mut Stat) -> Result<usize> { unsafe { syscall3(SYS_FSTAT, fd, stat as *mut Stat as usize, mem::size_of::<Stat>()) } } /// Get metadata about a filesystem pub fn fstatvfs(fd: usize, stat: &mut StatVfs) -> Result<usize> { unsafe { syscall3(SYS_FSTATVFS, fd, stat as *mut StatVfs as usize, mem::size_of::<StatVfs>()) } } /// Sync a file descriptor to its underlying medium pub fn fsync(fd: usize) -> Result<usize> { unsafe { syscall1(SYS_FSYNC, fd) } } /// Truncate or extend a file to a specified length pub fn ftruncate(fd: usize, len: usize) -> Result<usize> { unsafe { syscall2(SYS_FTRUNCATE, fd, len) } } // Change modify and/or access times pub fn futimens(fd: usize, times: &[TimeSpec]) -> Result<usize> { unsafe { syscall3(SYS_FUTIMENS, fd, times.as_ptr() as usize, times.len() * mem::size_of::<TimeSpec>()) } } /// Fast userspace mutex pub unsafe fn futex(addr: *mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) -> Result<usize> { syscall5(SYS_FUTEX, addr as usize, op, (val as isize) as usize, val2, addr2 as usize) } /// Get the effective group ID pub fn getegid() -> Result<usize> { unsafe { syscall0(SYS_GETEGID) } } /// Get the effective namespace pub fn getens() -> Result<usize> { unsafe { syscall0(SYS_GETENS) } } /// Get the effective user ID pub fn geteuid() -> Result<usize> { unsafe { syscall0(SYS_GETEUID) } } /// Get the current group ID pub fn getgid() -> Result<usize> { unsafe { syscall0(SYS_GETGID) } } /// Get the current namespace pub fn getns() -> Result<usize> { unsafe { syscall0(SYS_GETNS) } } /// Get the current process ID pub fn getpid() -> Result<usize> { unsafe { syscall0(SYS_GETPID) } } /// Get the process group ID pub fn getpgid(pid: usize) -> Result<usize> { unsafe { syscall1(SYS_GETPGID, pid) } } /// Get the parent process ID pub fn getppid() -> Result<usize> { unsafe { syscall0(SYS_GETPPID) } } /// Get the current user ID pub fn getuid() -> Result<usize> { unsafe { syscall0(SYS_GETUID) } } /// Set the I/O privilege level /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `EINVAL` - `level > 3` pub unsafe fn iopl(level: usize) -> Result<usize> { syscall1(SYS_IOPL, level) } /// Send a signal `sig` to the process identified by `pid` pub fn kill(pid: usize, sig: usize) -> Result<usize> { unsafe { syscall2(SYS_KILL, pid, sig) } } /// Create a link to a file pub unsafe fn link(old: *const u8, new: *const u8) -> Result<usize> { syscall2(SYS_LINK, old as usize, new as usize) } /// Seek to `offset` bytes in a file descriptor pub fn lseek(fd: usize, offset: isize, whence: usize) -> Result<usize> { unsafe { syscall3(SYS_LSEEK, fd, offset as usize, whence) } } /// Make a new scheme namespace pub fn mkns(schemes: &[[usize; 2]]) -> Result<usize> { unsafe { syscall2(SYS_MKNS, schemes.as_ptr() as usize, schemes.len()) } } /// Change mapping flags pub unsafe fn mprotect(addr: usize, size: usize, flags: MapFlags) -> Result<usize> { syscall3(SYS_MPROTECT, addr, size, flags.bits()) } /// Sleep for the time specified in `req` pub fn nanosleep(req: &TimeSpec, rem: &mut TimeSpec) -> Result<usize> { unsafe { syscall2(SYS_NANOSLEEP, req as *const TimeSpec as usize, rem as *mut TimeSpec as usize) } } /// Open a file pub fn open<T: AsRef<str>>(path: T, flags: usize) -> Result<usize> { unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) } } /// Allocate frames, linearly in physical memory. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc(size: usize) -> Result<usize> { syscall1(SYS_PHYSALLOC, size) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`. /// /// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience /// function is based on. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory pub unsafe fn physalloc2(size: usize, flags: usize) -> Result<usize> { let mut ret = 1usize; physalloc3(size, flags, &mut ret) } /// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain /// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated /// for this operation to succeed. The return value is the offset of the first frame, and `min` is /// overwritten with the number of frames actually allocated. /// /// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`]. /// /// # Errors /// /// * `EPERM` - `uid!= 0` /// * `ENOMEM` - the system has run out of available memory /// * `EINVAL` - `min = 0` pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result<usize> { syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize) } /// Free physically allocated pages /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physfree(physical_address: usize, size: usize) -> Result<usize> { syscall2(SYS_PHYSFREE, physical_address, size) } /// Map physical memory to virtual memory /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn physmap(physical_address: usize, size: usize, flags: PhysmapFlags) -> Result<usize> { syscall3(SYS_PHYSMAP, physical_address, size, flags.bits()) } /// Create a pair of file descriptors referencing the read and write ends of a pipe pub fn pipe2(fds: &mut [usize; 2], flags: usize) -> Result<usize> { unsafe { syscall2(SYS_PIPE2, fds.as_ptr() as usize, flags) } } /// Read from a file descriptor into a buffer pub fn read(fd: usize, buf: &mut [u8]) -> Result<usize> { unsafe { syscall3(SYS_READ, fd, buf.as_mut_ptr() as usize, buf.len()) } } /// Remove a directory pub fn rmdir<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_RMDIR, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Set the process group ID pub fn setpgid(pid: usize, pgid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETPGID, pid, pgid) } } /// Set the current process group IDs pub fn setregid(rgid: usize, egid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREGID, rgid, egid) } } /// Make a new scheme namespace pub fn setrens(rns: usize, ens: usize) -> Result<usize> { unsafe { syscall2(SYS_SETRENS, rns, ens) } } /// Set the current process user IDs pub fn setreuid(ruid: usize, euid: usize) -> Result<usize> { unsafe { syscall2(SYS_SETREUID, ruid, euid) } } /// Set up a signal handler pub fn sigaction(sig: usize, act: Option<&SigAction>, oldact: Option<&mut SigAction>) -> Result<usize> { unsafe { syscall4(SYS_SIGACTION, sig, act.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldact.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize, restorer as usize) } } /// Get and/or set signal masks pub fn sigprocmask(how: usize, set: Option<&[u64; 2]>, oldset: Option<&mut [u64; 2]>) -> Result<usize> { unsafe { syscall3(SYS_SIGPROCMASK, how, set.map(|x| x as *const _).unwrap_or_else(ptr::null) as usize, oldset.map(|x| x as *mut _).unwrap_or_else(ptr::null_mut) as usize) } } // Return from signal handler pub fn sigreturn() -> Result<usize> { unsafe { syscall0(SYS_SIGRETURN) } } /// Set the file mode creation mask pub fn umask(mask: usize) -> Result<usize> { unsafe { syscall1(SYS_UMASK, mask) } } /// Remove a file pub fn unlink<T: AsRef<str>>(path: T) -> Result<usize> { unsafe { syscall2(SYS_UNLINK, path.as_ref().as_ptr() as usize, path.as_ref().len()) } } /// Convert a virtual address to a physical one /// /// # Errors /// /// * `EPERM` - `uid!= 0` pub unsafe fn virttophys(virtual_address: usize) -> Result<usize> { syscall1(SYS_VIRTTOPHYS, virtual_address) } /// Check if a child process has exited or received a signal pub fn waitpid(pid: usize, status: &mut usize, options: WaitFlags) -> Result<usize> { unsafe { syscall3(SYS_WAITPID, pid, status as *mut usize as usize, options.bits()) } } /// Write a buffer to a file descriptor /// /// The kernel will attempt to write the bytes in `buf` to the file descriptor `fd`, returning /// either an `Err`, explained below, or `Ok(count)` where `count` is the number of bytes which /// were written. /// /// # Errors /// /// * `EAGAIN` - the file descriptor was opened with `O_NONBLOCK` and writing would block /// * `EBADF` - the file descriptor is not valid or is not open for writing /// * `EFAULT` - `buf` does not point to the process's addressible memory /// * `EIO` - an I/O error occurred /// * `ENOSPC` - the device containing the file descriptor has no room for data /// * `EPIPE` - the file descriptor refers to a pipe or socket whose reading end is closed pub fn write(fd: usize, buf: &[u8]) -> Result<usize> { unsafe { syscall3(SYS_WRITE, fd, buf.as_ptr() as usize, buf.len()) } } /// Yield the process's time slice to the kernel /// /// This function will return Ok(0) on success pub fn sched_yield() -> Result<usize> { unsafe { syscall0(SYS_YIELD) } }
} /// Get the current system time pub fn clock_gettime(clock: usize, tp: &mut TimeSpec) -> Result<usize> {
random_line_split
frame_info.rs
//! This module is used for having backtraces in the Wasm runtime. //! Once the Compiler has compiled the ModuleInfo, and we have a set of //! compiled functions (addresses and function index) and a module, //! then we can use this to set a backtrace for that module. //! //! # Example //! ```ignore //! use wasmer_vm::{ModuleInfo, FRAME_INFO}; //! //! let module: ModuleInfo =...; //! FRAME_INFO.register(module, compiled_functions); //! ``` use crate::serialize::SerializableFunctionFrameInfo; use std::cmp; use std::collections::BTreeMap; use std::sync::{Arc, RwLock}; use wasmer_compiler::{CompiledFunctionFrameInfo, SourceLoc, TrapInformation}; use wasmer_types::entity::{BoxedSlice, EntityRef, PrimaryMap}; use wasmer_types::LocalFunctionIndex; use wasmer_vm::{FunctionBodyPtr, ModuleInfo}; lazy_static::lazy_static! { /// This is a global cache of backtrace frame information for all active /// /// This global cache is used during `Trap` creation to symbolicate frames. /// This is populated on module compilation, and it is cleared out whenever /// all references to a module are dropped. pub static ref FRAME_INFO: RwLock<GlobalFrameInfo> = Default::default(); } #[derive(Default)] pub struct GlobalFrameInfo { /// An internal map that keeps track of backtrace frame information for /// each module. /// /// This map is morally a map of ranges to a map of information for that /// module. Each module is expected to reside in a disjoint section of /// contiguous memory. No modules can overlap. /// /// The key of this map is the highest address in the module and the value /// is the module's information, which also contains the start address. ranges: BTreeMap<usize, ModuleInfoFrameInfo>, } /// An RAII structure used to unregister a module's frame information when the /// module is destroyed. pub struct GlobalFrameInfoRegistration { /// The key that will be removed from the global `ranges` map when this is /// dropped. key: usize, } struct ModuleInfoFrameInfo { start: usize, functions: BTreeMap<usize, FunctionInfo>, module: Arc<ModuleInfo>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, } impl ModuleInfoFrameInfo { fn function_debug_info( &self, local_index: LocalFunctionIndex, ) -> &SerializableFunctionFrameInfo { &self.frame_infos.get(local_index).unwrap() } fn process_function_debug_info(&mut self, local_index: LocalFunctionIndex) { let func = self.frame_infos.get_mut(local_index).unwrap(); let processed: CompiledFunctionFrameInfo = match func { SerializableFunctionFrameInfo::Processed(_) => { // This should be a no-op on processed info return; } SerializableFunctionFrameInfo::Unprocessed(unprocessed) => unprocessed.deserialize(), }; *func = SerializableFunctionFrameInfo::Processed(processed) } fn processed_function_frame_info( &self, local_index: LocalFunctionIndex, ) -> &CompiledFunctionFrameInfo { match self.function_debug_info(local_index) { SerializableFunctionFrameInfo::Processed(di) => &di, _ => unreachable!("frame info should already be processed"), } } /// Gets a function given a pc fn function_info(&self, pc: usize) -> Option<&FunctionInfo> { let (end, func) = self.functions.range(pc..).next()?; if pc < func.start || *end < pc { return None; } Some(func) } } struct FunctionInfo { start: usize, local_index: LocalFunctionIndex, } impl GlobalFrameInfo { /// Fetches frame information about a program counter in a backtrace. /// /// Returns an object if this `pc` is known to some previously registered /// module, or returns `None` if no information can be found. pub fn lookup_frame_info(&self, pc: usize) -> Option<FrameInfo> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; // Use our relative position from the start of the function to find the // machine instruction that corresponds to `pc`, which then allows us to // map that to a wasm original source location. let rel_pos = pc - func.start; let instr_map = &module .processed_function_frame_info(func.local_index) .address_map; let pos = match instr_map .instructions .binary_search_by_key(&rel_pos, |map| map.code_offset) { // Exact hit! Ok(pos) => Some(pos), // This *would* be at the first slot in the array, so no // instructions cover `pc`. Err(0) => None, // This would be at the `nth` slot, so check `n-1` to see if we're // part of that instruction. This happens due to the minus one when // this function is called form trap symbolication, where we don't // always get called with a `pc` that's an exact instruction // boundary. Err(n) => { let instr = &instr_map.instructions[n - 1]; if instr.code_offset <= rel_pos && rel_pos < instr.code_offset + instr.code_len { Some(n - 1) } else
} }; // In debug mode for now assert that we found a mapping for `pc` within // the function, because otherwise something is buggy along the way and // not accounting for all the instructions. This isn't super critical // though so we can omit this check in release mode. debug_assert!(pos.is_some(), "failed to find instruction for {:x}", pc); let instr = match pos { Some(pos) => instr_map.instructions[pos].srcloc, None => instr_map.start_srcloc, }; let func_index = module.module.func_index(func.local_index); Some(FrameInfo { module_name: module.module.name(), func_index: func_index.index() as u32, function_name: module.module.function_names.get(&func_index).cloned(), instr, func_start: instr_map.start_srcloc, }) } /// Fetches trap information about a program counter in a backtrace. pub fn lookup_trap_info(&self, pc: usize) -> Option<&TrapInformation> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let traps = &module.processed_function_frame_info(func.local_index).traps; let idx = traps .binary_search_by_key(&((pc - func.start) as u32), |info| info.code_offset) .ok()?; Some(&traps[idx]) } /// Should process the frame before anything? pub fn should_process_frame(&self, pc: usize) -> Option<bool> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let extra_func_info = module.function_debug_info(func.local_index); Some(extra_func_info.is_unprocessed()) } /// Process the frame info in case is not yet processed pub fn maybe_process_frame(&mut self, pc: usize) -> Option<()> { let module = self.module_info_mut(pc)?; let func = module.function_info(pc)?; let func_local_index = func.local_index; module.process_function_debug_info(func_local_index); Some(()) } /// Gets a module given a pc fn module_info(&self, pc: usize) -> Option<&ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } /// Gets a module given a pc fn module_info_mut(&mut self, pc: usize) -> Option<&mut ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range_mut(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } } impl Drop for GlobalFrameInfoRegistration { fn drop(&mut self) { if let Ok(mut info) = FRAME_INFO.write() { info.ranges.remove(&self.key); } } } /// Registers a new compiled module's frame information. /// /// This function will register the `names` information for all of the /// compiled functions within `module`. If the `module` has no functions /// then `None` will be returned. Otherwise the returned object, when /// dropped, will be used to unregister all name information from this map. pub fn register( module: Arc<ModuleInfo>, finished_functions: &BoxedSlice<LocalFunctionIndex, FunctionBodyPtr>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, ) -> Option<GlobalFrameInfoRegistration> { let mut min = usize::max_value(); let mut max = 0; let mut functions = BTreeMap::new(); for (i, allocated) in finished_functions.iter() { let (start, end) = unsafe { let ptr = (***allocated).as_ptr(); let len = (***allocated).len(); (ptr as usize, ptr as usize + len) }; min = cmp::min(min, start); max = cmp::max(max, end); let func = FunctionInfo { start, local_index: i, }; assert!(functions.insert(end, func).is_none()); } if functions.is_empty() { return None; } let mut info = FRAME_INFO.write().unwrap(); // First up assert that our chunk of jit functions doesn't collide with // any other known chunks of jit functions... if let Some((_, prev)) = info.ranges.range(max..).next() { assert!(prev.start > max); } if let Some((prev_end, _)) = info.ranges.range(..=min).next_back() { assert!(*prev_end < min); } //... then insert our range and assert nothing was there previously let prev = info.ranges.insert( max, ModuleInfoFrameInfo { start: min, functions, module, frame_infos, }, ); assert!(prev.is_none()); Some(GlobalFrameInfoRegistration { key: max }) } /// Description of a frame in a backtrace for a [`Trap`]. /// /// Whenever a WebAssembly trap occurs an instance of [`Trap`] is created. Each /// [`Trap`] has a backtrace of the WebAssembly frames that led to the trap, and /// each frame is described by this structure. /// /// [`Trap`]: crate::Trap #[derive(Debug, Clone)] pub struct FrameInfo { module_name: String, func_index: u32, function_name: Option<String>, func_start: SourceLoc, instr: SourceLoc, } impl FrameInfo { /// Returns the WebAssembly function index for this frame. /// /// This function index is the index in the function index space of the /// WebAssembly module that this frame comes from. pub fn func_index(&self) -> u32 { self.func_index } /// Returns the identifer of the module that this frame is for. /// /// ModuleInfo identifiers are present in the `name` section of a WebAssembly /// binary, but this may not return the exact item in the `name` section. /// ModuleInfo names can be overwritten at construction time or perhaps inferred /// from file names. The primary purpose of this function is to assist in /// debugging and therefore may be tweaked over time. /// /// This function returns `None` when no name can be found or inferred. pub fn module_name(&self) -> &str { &self.module_name } /// Returns a descriptive name of the function for this frame, if one is /// available. /// /// The name of this function may come from the `name` section of the /// WebAssembly binary, or wasmer may try to infer a better name for it if /// not available, for example the name of the export if it's exported. /// /// This return value is primarily used for debugging and human-readable /// purposes for things like traps. Note that the exact return value may be /// tweaked over time here and isn't guaranteed to be something in /// particular about a wasm module due to its primary purpose of assisting /// in debugging. /// /// This function returns `None` when no name could be inferred. pub fn function_name(&self) -> Option<&str> { self.function_name.as_deref() } /// Returns the offset within the original wasm module this frame's program /// counter was at. /// /// The offset here is the offset from the beginning of the original wasm /// module to the instruction that this frame points to. pub fn module_offset(&self) -> usize { self.instr.bits() as usize } /// Returns the offset from the original wasm module's function to this /// frame's program counter. /// /// The offset here is the offset from the beginning of the defining /// function of this frame (within the wasm module) to the instruction this /// frame points to. pub fn func_offset(&self) -> usize { (self.instr.bits() - self.func_start.bits()) as usize } }
{ None }
conditional_block
frame_info.rs
//! This module is used for having backtraces in the Wasm runtime. //! Once the Compiler has compiled the ModuleInfo, and we have a set of //! compiled functions (addresses and function index) and a module, //! then we can use this to set a backtrace for that module. //! //! # Example //! ```ignore //! use wasmer_vm::{ModuleInfo, FRAME_INFO}; //! //! let module: ModuleInfo =...; //! FRAME_INFO.register(module, compiled_functions); //! ``` use crate::serialize::SerializableFunctionFrameInfo; use std::cmp; use std::collections::BTreeMap; use std::sync::{Arc, RwLock}; use wasmer_compiler::{CompiledFunctionFrameInfo, SourceLoc, TrapInformation}; use wasmer_types::entity::{BoxedSlice, EntityRef, PrimaryMap}; use wasmer_types::LocalFunctionIndex; use wasmer_vm::{FunctionBodyPtr, ModuleInfo}; lazy_static::lazy_static! { /// This is a global cache of backtrace frame information for all active /// /// This global cache is used during `Trap` creation to symbolicate frames. /// This is populated on module compilation, and it is cleared out whenever /// all references to a module are dropped. pub static ref FRAME_INFO: RwLock<GlobalFrameInfo> = Default::default(); } #[derive(Default)] pub struct GlobalFrameInfo { /// An internal map that keeps track of backtrace frame information for /// each module. /// /// This map is morally a map of ranges to a map of information for that /// module. Each module is expected to reside in a disjoint section of /// contiguous memory. No modules can overlap. /// /// The key of this map is the highest address in the module and the value /// is the module's information, which also contains the start address. ranges: BTreeMap<usize, ModuleInfoFrameInfo>, } /// An RAII structure used to unregister a module's frame information when the /// module is destroyed. pub struct GlobalFrameInfoRegistration { /// The key that will be removed from the global `ranges` map when this is /// dropped. key: usize, } struct ModuleInfoFrameInfo { start: usize, functions: BTreeMap<usize, FunctionInfo>, module: Arc<ModuleInfo>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, } impl ModuleInfoFrameInfo { fn function_debug_info( &self, local_index: LocalFunctionIndex, ) -> &SerializableFunctionFrameInfo { &self.frame_infos.get(local_index).unwrap() } fn process_function_debug_info(&mut self, local_index: LocalFunctionIndex) { let func = self.frame_infos.get_mut(local_index).unwrap(); let processed: CompiledFunctionFrameInfo = match func { SerializableFunctionFrameInfo::Processed(_) => { // This should be a no-op on processed info return; } SerializableFunctionFrameInfo::Unprocessed(unprocessed) => unprocessed.deserialize(), }; *func = SerializableFunctionFrameInfo::Processed(processed) } fn processed_function_frame_info( &self, local_index: LocalFunctionIndex, ) -> &CompiledFunctionFrameInfo { match self.function_debug_info(local_index) { SerializableFunctionFrameInfo::Processed(di) => &di, _ => unreachable!("frame info should already be processed"), } } /// Gets a function given a pc fn function_info(&self, pc: usize) -> Option<&FunctionInfo> { let (end, func) = self.functions.range(pc..).next()?; if pc < func.start || *end < pc { return None; } Some(func) } } struct FunctionInfo { start: usize, local_index: LocalFunctionIndex, } impl GlobalFrameInfo { /// Fetches frame information about a program counter in a backtrace. /// /// Returns an object if this `pc` is known to some previously registered /// module, or returns `None` if no information can be found. pub fn lookup_frame_info(&self, pc: usize) -> Option<FrameInfo> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; // Use our relative position from the start of the function to find the // machine instruction that corresponds to `pc`, which then allows us to // map that to a wasm original source location. let rel_pos = pc - func.start; let instr_map = &module .processed_function_frame_info(func.local_index) .address_map; let pos = match instr_map .instructions .binary_search_by_key(&rel_pos, |map| map.code_offset) { // Exact hit! Ok(pos) => Some(pos), // This *would* be at the first slot in the array, so no // instructions cover `pc`. Err(0) => None, // This would be at the `nth` slot, so check `n-1` to see if we're // part of that instruction. This happens due to the minus one when // this function is called form trap symbolication, where we don't // always get called with a `pc` that's an exact instruction // boundary. Err(n) => { let instr = &instr_map.instructions[n - 1]; if instr.code_offset <= rel_pos && rel_pos < instr.code_offset + instr.code_len { Some(n - 1) } else { None } } }; // In debug mode for now assert that we found a mapping for `pc` within // the function, because otherwise something is buggy along the way and // not accounting for all the instructions. This isn't super critical // though so we can omit this check in release mode. debug_assert!(pos.is_some(), "failed to find instruction for {:x}", pc); let instr = match pos { Some(pos) => instr_map.instructions[pos].srcloc, None => instr_map.start_srcloc, }; let func_index = module.module.func_index(func.local_index); Some(FrameInfo { module_name: module.module.name(), func_index: func_index.index() as u32, function_name: module.module.function_names.get(&func_index).cloned(), instr, func_start: instr_map.start_srcloc, }) } /// Fetches trap information about a program counter in a backtrace. pub fn lookup_trap_info(&self, pc: usize) -> Option<&TrapInformation> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let traps = &module.processed_function_frame_info(func.local_index).traps; let idx = traps .binary_search_by_key(&((pc - func.start) as u32), |info| info.code_offset) .ok()?; Some(&traps[idx]) } /// Should process the frame before anything? pub fn should_process_frame(&self, pc: usize) -> Option<bool>
/// Process the frame info in case is not yet processed pub fn maybe_process_frame(&mut self, pc: usize) -> Option<()> { let module = self.module_info_mut(pc)?; let func = module.function_info(pc)?; let func_local_index = func.local_index; module.process_function_debug_info(func_local_index); Some(()) } /// Gets a module given a pc fn module_info(&self, pc: usize) -> Option<&ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } /// Gets a module given a pc fn module_info_mut(&mut self, pc: usize) -> Option<&mut ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range_mut(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } } impl Drop for GlobalFrameInfoRegistration { fn drop(&mut self) { if let Ok(mut info) = FRAME_INFO.write() { info.ranges.remove(&self.key); } } } /// Registers a new compiled module's frame information. /// /// This function will register the `names` information for all of the /// compiled functions within `module`. If the `module` has no functions /// then `None` will be returned. Otherwise the returned object, when /// dropped, will be used to unregister all name information from this map. pub fn register( module: Arc<ModuleInfo>, finished_functions: &BoxedSlice<LocalFunctionIndex, FunctionBodyPtr>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, ) -> Option<GlobalFrameInfoRegistration> { let mut min = usize::max_value(); let mut max = 0; let mut functions = BTreeMap::new(); for (i, allocated) in finished_functions.iter() { let (start, end) = unsafe { let ptr = (***allocated).as_ptr(); let len = (***allocated).len(); (ptr as usize, ptr as usize + len) }; min = cmp::min(min, start); max = cmp::max(max, end); let func = FunctionInfo { start, local_index: i, }; assert!(functions.insert(end, func).is_none()); } if functions.is_empty() { return None; } let mut info = FRAME_INFO.write().unwrap(); // First up assert that our chunk of jit functions doesn't collide with // any other known chunks of jit functions... if let Some((_, prev)) = info.ranges.range(max..).next() { assert!(prev.start > max); } if let Some((prev_end, _)) = info.ranges.range(..=min).next_back() { assert!(*prev_end < min); } //... then insert our range and assert nothing was there previously let prev = info.ranges.insert( max, ModuleInfoFrameInfo { start: min, functions, module, frame_infos, }, ); assert!(prev.is_none()); Some(GlobalFrameInfoRegistration { key: max }) } /// Description of a frame in a backtrace for a [`Trap`]. /// /// Whenever a WebAssembly trap occurs an instance of [`Trap`] is created. Each /// [`Trap`] has a backtrace of the WebAssembly frames that led to the trap, and /// each frame is described by this structure. /// /// [`Trap`]: crate::Trap #[derive(Debug, Clone)] pub struct FrameInfo { module_name: String, func_index: u32, function_name: Option<String>, func_start: SourceLoc, instr: SourceLoc, } impl FrameInfo { /// Returns the WebAssembly function index for this frame. /// /// This function index is the index in the function index space of the /// WebAssembly module that this frame comes from. pub fn func_index(&self) -> u32 { self.func_index } /// Returns the identifer of the module that this frame is for. /// /// ModuleInfo identifiers are present in the `name` section of a WebAssembly /// binary, but this may not return the exact item in the `name` section. /// ModuleInfo names can be overwritten at construction time or perhaps inferred /// from file names. The primary purpose of this function is to assist in /// debugging and therefore may be tweaked over time. /// /// This function returns `None` when no name can be found or inferred. pub fn module_name(&self) -> &str { &self.module_name } /// Returns a descriptive name of the function for this frame, if one is /// available. /// /// The name of this function may come from the `name` section of the /// WebAssembly binary, or wasmer may try to infer a better name for it if /// not available, for example the name of the export if it's exported. /// /// This return value is primarily used for debugging and human-readable /// purposes for things like traps. Note that the exact return value may be /// tweaked over time here and isn't guaranteed to be something in /// particular about a wasm module due to its primary purpose of assisting /// in debugging. /// /// This function returns `None` when no name could be inferred. pub fn function_name(&self) -> Option<&str> { self.function_name.as_deref() } /// Returns the offset within the original wasm module this frame's program /// counter was at. /// /// The offset here is the offset from the beginning of the original wasm /// module to the instruction that this frame points to. pub fn module_offset(&self) -> usize { self.instr.bits() as usize } /// Returns the offset from the original wasm module's function to this /// frame's program counter. /// /// The offset here is the offset from the beginning of the defining /// function of this frame (within the wasm module) to the instruction this /// frame points to. pub fn func_offset(&self) -> usize { (self.instr.bits() - self.func_start.bits()) as usize } }
{ let module = self.module_info(pc)?; let func = module.function_info(pc)?; let extra_func_info = module.function_debug_info(func.local_index); Some(extra_func_info.is_unprocessed()) }
identifier_body
frame_info.rs
//! This module is used for having backtraces in the Wasm runtime. //! Once the Compiler has compiled the ModuleInfo, and we have a set of //! compiled functions (addresses and function index) and a module, //! then we can use this to set a backtrace for that module. //! //! # Example //! ```ignore //! use wasmer_vm::{ModuleInfo, FRAME_INFO}; //! //! let module: ModuleInfo =...; //! FRAME_INFO.register(module, compiled_functions); //! ``` use crate::serialize::SerializableFunctionFrameInfo; use std::cmp; use std::collections::BTreeMap; use std::sync::{Arc, RwLock}; use wasmer_compiler::{CompiledFunctionFrameInfo, SourceLoc, TrapInformation}; use wasmer_types::entity::{BoxedSlice, EntityRef, PrimaryMap}; use wasmer_types::LocalFunctionIndex; use wasmer_vm::{FunctionBodyPtr, ModuleInfo}; lazy_static::lazy_static! { /// This is a global cache of backtrace frame information for all active /// /// This global cache is used during `Trap` creation to symbolicate frames. /// This is populated on module compilation, and it is cleared out whenever /// all references to a module are dropped. pub static ref FRAME_INFO: RwLock<GlobalFrameInfo> = Default::default(); } #[derive(Default)] pub struct GlobalFrameInfo { /// An internal map that keeps track of backtrace frame information for /// each module. /// /// This map is morally a map of ranges to a map of information for that /// module. Each module is expected to reside in a disjoint section of /// contiguous memory. No modules can overlap. /// /// The key of this map is the highest address in the module and the value /// is the module's information, which also contains the start address. ranges: BTreeMap<usize, ModuleInfoFrameInfo>, } /// An RAII structure used to unregister a module's frame information when the /// module is destroyed. pub struct GlobalFrameInfoRegistration { /// The key that will be removed from the global `ranges` map when this is /// dropped. key: usize, } struct ModuleInfoFrameInfo { start: usize, functions: BTreeMap<usize, FunctionInfo>, module: Arc<ModuleInfo>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, } impl ModuleInfoFrameInfo { fn function_debug_info( &self, local_index: LocalFunctionIndex, ) -> &SerializableFunctionFrameInfo { &self.frame_infos.get(local_index).unwrap() } fn process_function_debug_info(&mut self, local_index: LocalFunctionIndex) { let func = self.frame_infos.get_mut(local_index).unwrap(); let processed: CompiledFunctionFrameInfo = match func { SerializableFunctionFrameInfo::Processed(_) => { // This should be a no-op on processed info return; } SerializableFunctionFrameInfo::Unprocessed(unprocessed) => unprocessed.deserialize(), }; *func = SerializableFunctionFrameInfo::Processed(processed) } fn processed_function_frame_info( &self, local_index: LocalFunctionIndex, ) -> &CompiledFunctionFrameInfo { match self.function_debug_info(local_index) { SerializableFunctionFrameInfo::Processed(di) => &di, _ => unreachable!("frame info should already be processed"), } } /// Gets a function given a pc fn function_info(&self, pc: usize) -> Option<&FunctionInfo> { let (end, func) = self.functions.range(pc..).next()?; if pc < func.start || *end < pc { return None; } Some(func) } } struct FunctionInfo { start: usize, local_index: LocalFunctionIndex, } impl GlobalFrameInfo { /// Fetches frame information about a program counter in a backtrace. /// /// Returns an object if this `pc` is known to some previously registered /// module, or returns `None` if no information can be found. pub fn lookup_frame_info(&self, pc: usize) -> Option<FrameInfo> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; // Use our relative position from the start of the function to find the // machine instruction that corresponds to `pc`, which then allows us to // map that to a wasm original source location. let rel_pos = pc - func.start; let instr_map = &module .processed_function_frame_info(func.local_index) .address_map; let pos = match instr_map .instructions .binary_search_by_key(&rel_pos, |map| map.code_offset) { // Exact hit! Ok(pos) => Some(pos), // This *would* be at the first slot in the array, so no // instructions cover `pc`. Err(0) => None, // This would be at the `nth` slot, so check `n-1` to see if we're // part of that instruction. This happens due to the minus one when // this function is called form trap symbolication, where we don't // always get called with a `pc` that's an exact instruction // boundary. Err(n) => { let instr = &instr_map.instructions[n - 1]; if instr.code_offset <= rel_pos && rel_pos < instr.code_offset + instr.code_len { Some(n - 1) } else { None } } }; // In debug mode for now assert that we found a mapping for `pc` within // the function, because otherwise something is buggy along the way and // not accounting for all the instructions. This isn't super critical // though so we can omit this check in release mode. debug_assert!(pos.is_some(), "failed to find instruction for {:x}", pc); let instr = match pos { Some(pos) => instr_map.instructions[pos].srcloc, None => instr_map.start_srcloc, }; let func_index = module.module.func_index(func.local_index); Some(FrameInfo { module_name: module.module.name(), func_index: func_index.index() as u32, function_name: module.module.function_names.get(&func_index).cloned(), instr, func_start: instr_map.start_srcloc, }) } /// Fetches trap information about a program counter in a backtrace. pub fn lookup_trap_info(&self, pc: usize) -> Option<&TrapInformation> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let traps = &module.processed_function_frame_info(func.local_index).traps; let idx = traps .binary_search_by_key(&((pc - func.start) as u32), |info| info.code_offset) .ok()?; Some(&traps[idx]) } /// Should process the frame before anything? pub fn should_process_frame(&self, pc: usize) -> Option<bool> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let extra_func_info = module.function_debug_info(func.local_index); Some(extra_func_info.is_unprocessed()) } /// Process the frame info in case is not yet processed pub fn maybe_process_frame(&mut self, pc: usize) -> Option<()> { let module = self.module_info_mut(pc)?; let func = module.function_info(pc)?; let func_local_index = func.local_index; module.process_function_debug_info(func_local_index); Some(()) } /// Gets a module given a pc fn module_info(&self, pc: usize) -> Option<&ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } /// Gets a module given a pc fn module_info_mut(&mut self, pc: usize) -> Option<&mut ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range_mut(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } } impl Drop for GlobalFrameInfoRegistration { fn drop(&mut self) { if let Ok(mut info) = FRAME_INFO.write() { info.ranges.remove(&self.key); } } } /// Registers a new compiled module's frame information. /// /// This function will register the `names` information for all of the /// compiled functions within `module`. If the `module` has no functions /// then `None` will be returned. Otherwise the returned object, when /// dropped, will be used to unregister all name information from this map. pub fn register( module: Arc<ModuleInfo>, finished_functions: &BoxedSlice<LocalFunctionIndex, FunctionBodyPtr>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, ) -> Option<GlobalFrameInfoRegistration> { let mut min = usize::max_value(); let mut max = 0; let mut functions = BTreeMap::new(); for (i, allocated) in finished_functions.iter() { let (start, end) = unsafe { let ptr = (***allocated).as_ptr(); let len = (***allocated).len(); (ptr as usize, ptr as usize + len) }; min = cmp::min(min, start); max = cmp::max(max, end); let func = FunctionInfo { start, local_index: i, }; assert!(functions.insert(end, func).is_none()); } if functions.is_empty() { return None; } let mut info = FRAME_INFO.write().unwrap(); // First up assert that our chunk of jit functions doesn't collide with // any other known chunks of jit functions... if let Some((_, prev)) = info.ranges.range(max..).next() { assert!(prev.start > max); } if let Some((prev_end, _)) = info.ranges.range(..=min).next_back() { assert!(*prev_end < min); } //... then insert our range and assert nothing was there previously let prev = info.ranges.insert( max, ModuleInfoFrameInfo { start: min, functions, module, frame_infos, }, ); assert!(prev.is_none()); Some(GlobalFrameInfoRegistration { key: max }) } /// Description of a frame in a backtrace for a [`Trap`]. /// /// Whenever a WebAssembly trap occurs an instance of [`Trap`] is created. Each /// [`Trap`] has a backtrace of the WebAssembly frames that led to the trap, and /// each frame is described by this structure. /// /// [`Trap`]: crate::Trap #[derive(Debug, Clone)] pub struct FrameInfo { module_name: String, func_index: u32, function_name: Option<String>, func_start: SourceLoc, instr: SourceLoc, } impl FrameInfo { /// Returns the WebAssembly function index for this frame. /// /// This function index is the index in the function index space of the /// WebAssembly module that this frame comes from. pub fn func_index(&self) -> u32 { self.func_index } /// Returns the identifer of the module that this frame is for. /// /// ModuleInfo identifiers are present in the `name` section of a WebAssembly /// binary, but this may not return the exact item in the `name` section. /// ModuleInfo names can be overwritten at construction time or perhaps inferred /// from file names. The primary purpose of this function is to assist in /// debugging and therefore may be tweaked over time. /// /// This function returns `None` when no name can be found or inferred. pub fn module_name(&self) -> &str { &self.module_name } /// Returns a descriptive name of the function for this frame, if one is /// available. /// /// The name of this function may come from the `name` section of the /// WebAssembly binary, or wasmer may try to infer a better name for it if /// not available, for example the name of the export if it's exported. /// /// This return value is primarily used for debugging and human-readable /// purposes for things like traps. Note that the exact return value may be /// tweaked over time here and isn't guaranteed to be something in /// particular about a wasm module due to its primary purpose of assisting /// in debugging. /// /// This function returns `None` when no name could be inferred. pub fn function_name(&self) -> Option<&str> { self.function_name.as_deref() } /// Returns the offset within the original wasm module this frame's program /// counter was at.
/// /// The offset here is the offset from the beginning of the original wasm /// module to the instruction that this frame points to. pub fn module_offset(&self) -> usize { self.instr.bits() as usize } /// Returns the offset from the original wasm module's function to this /// frame's program counter. /// /// The offset here is the offset from the beginning of the defining /// function of this frame (within the wasm module) to the instruction this /// frame points to. pub fn func_offset(&self) -> usize { (self.instr.bits() - self.func_start.bits()) as usize } }
random_line_split
frame_info.rs
//! This module is used for having backtraces in the Wasm runtime. //! Once the Compiler has compiled the ModuleInfo, and we have a set of //! compiled functions (addresses and function index) and a module, //! then we can use this to set a backtrace for that module. //! //! # Example //! ```ignore //! use wasmer_vm::{ModuleInfo, FRAME_INFO}; //! //! let module: ModuleInfo =...; //! FRAME_INFO.register(module, compiled_functions); //! ``` use crate::serialize::SerializableFunctionFrameInfo; use std::cmp; use std::collections::BTreeMap; use std::sync::{Arc, RwLock}; use wasmer_compiler::{CompiledFunctionFrameInfo, SourceLoc, TrapInformation}; use wasmer_types::entity::{BoxedSlice, EntityRef, PrimaryMap}; use wasmer_types::LocalFunctionIndex; use wasmer_vm::{FunctionBodyPtr, ModuleInfo}; lazy_static::lazy_static! { /// This is a global cache of backtrace frame information for all active /// /// This global cache is used during `Trap` creation to symbolicate frames. /// This is populated on module compilation, and it is cleared out whenever /// all references to a module are dropped. pub static ref FRAME_INFO: RwLock<GlobalFrameInfo> = Default::default(); } #[derive(Default)] pub struct
{ /// An internal map that keeps track of backtrace frame information for /// each module. /// /// This map is morally a map of ranges to a map of information for that /// module. Each module is expected to reside in a disjoint section of /// contiguous memory. No modules can overlap. /// /// The key of this map is the highest address in the module and the value /// is the module's information, which also contains the start address. ranges: BTreeMap<usize, ModuleInfoFrameInfo>, } /// An RAII structure used to unregister a module's frame information when the /// module is destroyed. pub struct GlobalFrameInfoRegistration { /// The key that will be removed from the global `ranges` map when this is /// dropped. key: usize, } struct ModuleInfoFrameInfo { start: usize, functions: BTreeMap<usize, FunctionInfo>, module: Arc<ModuleInfo>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, } impl ModuleInfoFrameInfo { fn function_debug_info( &self, local_index: LocalFunctionIndex, ) -> &SerializableFunctionFrameInfo { &self.frame_infos.get(local_index).unwrap() } fn process_function_debug_info(&mut self, local_index: LocalFunctionIndex) { let func = self.frame_infos.get_mut(local_index).unwrap(); let processed: CompiledFunctionFrameInfo = match func { SerializableFunctionFrameInfo::Processed(_) => { // This should be a no-op on processed info return; } SerializableFunctionFrameInfo::Unprocessed(unprocessed) => unprocessed.deserialize(), }; *func = SerializableFunctionFrameInfo::Processed(processed) } fn processed_function_frame_info( &self, local_index: LocalFunctionIndex, ) -> &CompiledFunctionFrameInfo { match self.function_debug_info(local_index) { SerializableFunctionFrameInfo::Processed(di) => &di, _ => unreachable!("frame info should already be processed"), } } /// Gets a function given a pc fn function_info(&self, pc: usize) -> Option<&FunctionInfo> { let (end, func) = self.functions.range(pc..).next()?; if pc < func.start || *end < pc { return None; } Some(func) } } struct FunctionInfo { start: usize, local_index: LocalFunctionIndex, } impl GlobalFrameInfo { /// Fetches frame information about a program counter in a backtrace. /// /// Returns an object if this `pc` is known to some previously registered /// module, or returns `None` if no information can be found. pub fn lookup_frame_info(&self, pc: usize) -> Option<FrameInfo> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; // Use our relative position from the start of the function to find the // machine instruction that corresponds to `pc`, which then allows us to // map that to a wasm original source location. let rel_pos = pc - func.start; let instr_map = &module .processed_function_frame_info(func.local_index) .address_map; let pos = match instr_map .instructions .binary_search_by_key(&rel_pos, |map| map.code_offset) { // Exact hit! Ok(pos) => Some(pos), // This *would* be at the first slot in the array, so no // instructions cover `pc`. Err(0) => None, // This would be at the `nth` slot, so check `n-1` to see if we're // part of that instruction. This happens due to the minus one when // this function is called form trap symbolication, where we don't // always get called with a `pc` that's an exact instruction // boundary. Err(n) => { let instr = &instr_map.instructions[n - 1]; if instr.code_offset <= rel_pos && rel_pos < instr.code_offset + instr.code_len { Some(n - 1) } else { None } } }; // In debug mode for now assert that we found a mapping for `pc` within // the function, because otherwise something is buggy along the way and // not accounting for all the instructions. This isn't super critical // though so we can omit this check in release mode. debug_assert!(pos.is_some(), "failed to find instruction for {:x}", pc); let instr = match pos { Some(pos) => instr_map.instructions[pos].srcloc, None => instr_map.start_srcloc, }; let func_index = module.module.func_index(func.local_index); Some(FrameInfo { module_name: module.module.name(), func_index: func_index.index() as u32, function_name: module.module.function_names.get(&func_index).cloned(), instr, func_start: instr_map.start_srcloc, }) } /// Fetches trap information about a program counter in a backtrace. pub fn lookup_trap_info(&self, pc: usize) -> Option<&TrapInformation> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let traps = &module.processed_function_frame_info(func.local_index).traps; let idx = traps .binary_search_by_key(&((pc - func.start) as u32), |info| info.code_offset) .ok()?; Some(&traps[idx]) } /// Should process the frame before anything? pub fn should_process_frame(&self, pc: usize) -> Option<bool> { let module = self.module_info(pc)?; let func = module.function_info(pc)?; let extra_func_info = module.function_debug_info(func.local_index); Some(extra_func_info.is_unprocessed()) } /// Process the frame info in case is not yet processed pub fn maybe_process_frame(&mut self, pc: usize) -> Option<()> { let module = self.module_info_mut(pc)?; let func = module.function_info(pc)?; let func_local_index = func.local_index; module.process_function_debug_info(func_local_index); Some(()) } /// Gets a module given a pc fn module_info(&self, pc: usize) -> Option<&ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } /// Gets a module given a pc fn module_info_mut(&mut self, pc: usize) -> Option<&mut ModuleInfoFrameInfo> { let (end, module_info) = self.ranges.range_mut(pc..).next()?; if pc < module_info.start || *end < pc { return None; } Some(module_info) } } impl Drop for GlobalFrameInfoRegistration { fn drop(&mut self) { if let Ok(mut info) = FRAME_INFO.write() { info.ranges.remove(&self.key); } } } /// Registers a new compiled module's frame information. /// /// This function will register the `names` information for all of the /// compiled functions within `module`. If the `module` has no functions /// then `None` will be returned. Otherwise the returned object, when /// dropped, will be used to unregister all name information from this map. pub fn register( module: Arc<ModuleInfo>, finished_functions: &BoxedSlice<LocalFunctionIndex, FunctionBodyPtr>, frame_infos: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>, ) -> Option<GlobalFrameInfoRegistration> { let mut min = usize::max_value(); let mut max = 0; let mut functions = BTreeMap::new(); for (i, allocated) in finished_functions.iter() { let (start, end) = unsafe { let ptr = (***allocated).as_ptr(); let len = (***allocated).len(); (ptr as usize, ptr as usize + len) }; min = cmp::min(min, start); max = cmp::max(max, end); let func = FunctionInfo { start, local_index: i, }; assert!(functions.insert(end, func).is_none()); } if functions.is_empty() { return None; } let mut info = FRAME_INFO.write().unwrap(); // First up assert that our chunk of jit functions doesn't collide with // any other known chunks of jit functions... if let Some((_, prev)) = info.ranges.range(max..).next() { assert!(prev.start > max); } if let Some((prev_end, _)) = info.ranges.range(..=min).next_back() { assert!(*prev_end < min); } //... then insert our range and assert nothing was there previously let prev = info.ranges.insert( max, ModuleInfoFrameInfo { start: min, functions, module, frame_infos, }, ); assert!(prev.is_none()); Some(GlobalFrameInfoRegistration { key: max }) } /// Description of a frame in a backtrace for a [`Trap`]. /// /// Whenever a WebAssembly trap occurs an instance of [`Trap`] is created. Each /// [`Trap`] has a backtrace of the WebAssembly frames that led to the trap, and /// each frame is described by this structure. /// /// [`Trap`]: crate::Trap #[derive(Debug, Clone)] pub struct FrameInfo { module_name: String, func_index: u32, function_name: Option<String>, func_start: SourceLoc, instr: SourceLoc, } impl FrameInfo { /// Returns the WebAssembly function index for this frame. /// /// This function index is the index in the function index space of the /// WebAssembly module that this frame comes from. pub fn func_index(&self) -> u32 { self.func_index } /// Returns the identifer of the module that this frame is for. /// /// ModuleInfo identifiers are present in the `name` section of a WebAssembly /// binary, but this may not return the exact item in the `name` section. /// ModuleInfo names can be overwritten at construction time or perhaps inferred /// from file names. The primary purpose of this function is to assist in /// debugging and therefore may be tweaked over time. /// /// This function returns `None` when no name can be found or inferred. pub fn module_name(&self) -> &str { &self.module_name } /// Returns a descriptive name of the function for this frame, if one is /// available. /// /// The name of this function may come from the `name` section of the /// WebAssembly binary, or wasmer may try to infer a better name for it if /// not available, for example the name of the export if it's exported. /// /// This return value is primarily used for debugging and human-readable /// purposes for things like traps. Note that the exact return value may be /// tweaked over time here and isn't guaranteed to be something in /// particular about a wasm module due to its primary purpose of assisting /// in debugging. /// /// This function returns `None` when no name could be inferred. pub fn function_name(&self) -> Option<&str> { self.function_name.as_deref() } /// Returns the offset within the original wasm module this frame's program /// counter was at. /// /// The offset here is the offset from the beginning of the original wasm /// module to the instruction that this frame points to. pub fn module_offset(&self) -> usize { self.instr.bits() as usize } /// Returns the offset from the original wasm module's function to this /// frame's program counter. /// /// The offset here is the offset from the beginning of the defining /// function of this frame (within the wasm module) to the instruction this /// frame points to. pub fn func_offset(&self) -> usize { (self.instr.bits() - self.func_start.bits()) as usize } }
GlobalFrameInfo
identifier_name
lib.rs
# struct UserCreatedData { //! # user_id: String, //! # } //! # //! fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { //! match (t, v) { //! (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), //! _ => None, //! } //! } //! //! // create a publisher instance //! let publisher = MockPublisher::default(); //! let hedwig = Hedwig::new( //! schema, //! "myapp", //! publisher, //! router, //! )?; //! //! async { //! let published_ids = hedwig.publish(Message::new( //! MessageType::UserCreated, //! Version(MajorVersion(1), MinorVersion(0)), //! UserCreatedData { user_id: "U_123".into() } //! )).await; //! }; //! //! # Ok(()) //! # } //! ``` #![deny(missing_docs, unused_import_braces, unused_qualifications)] #![warn(trivial_casts, trivial_numeric_casts, unsafe_code, unstable_features)] use std::{ collections::HashMap, fmt, future::Future, mem, time::{SystemTime, UNIX_EPOCH}, }; use futures::stream::StreamExt; use uuid::Uuid; use valico::json_schema::{SchemaError, Scope, ValidationState}; #[cfg(feature = "google")] mod google_publisher; mod mock_publisher; mod null_publisher; /// Implementations of the Publisher trait pub mod publishers { #[cfg(feature = "google")] pub use super::google_publisher::GooglePubSubPublisher; pub use super::mock_publisher::MockPublisher; pub use super::null_publisher::NullPublisher; } const FORMAT_VERSION_V1: Version = Version(MajorVersion(1), MinorVersion(0)); /// All errors that may be returned when instantiating a new Hedwig instance. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// Unable to deserialize schema #[error("Unable to deserialize schema")] SchemaDeserialize(#[source] serde_json::Error), /// Schema failed to compile #[error("Schema failed to compile")] SchemaCompile(#[from] SchemaError), /// Unable to serialize message #[error("Unable to serialize message")] MessageSerialize(#[source] serde_json::Error), /// Message is not routable #[error("Message {0} is not routable")] MessageRoute(Uuid), /// Could not parse a schema URL #[error("Could not parse `{1}` as a schema URL")] SchemaUrlParse(#[source] url::ParseError, String), /// Could not resolve the schema URL #[error("Could not resolve `{0}` to a schema")] SchemaUrlResolve(url::Url), /// Could not validate message data #[error("Message data does not validate per the schema: {0}")] DataValidation(String), /// Publisher failed to publish a message #[error("Publisher failed to publish a message batch")] Publisher(#[source] Box<dyn std::error::Error + Send + Sync>), /// Publisher failed to publish multiple batches of messages #[error("Publisher failed to publish multiple batches (total of {} errors)", _1.len() + 1)] PublisherMultiple( #[source] Box<dyn std::error::Error + Send + Sync>, Vec<Box<dyn std::error::Error + Send + Sync>>, ), } type AnyError = Box<dyn std::error::Error + Send + Sync>; /// The special result type for [`Publisher::publish`](trait.Publisher.html) #[derive(Debug)] pub enum PublisherResult<Id> { /// Publisher succeeded. /// /// Contains a vector of published message IDs. Success(Vec<Id>), /// Publisher failed to publish any of the messages. OneError(AnyError, Vec<ValidatedMessage>), /// Publisher failed to publish some of the messages. /// /// The error type has a per-message granularity. PerMessage(Vec<Result<Id, (AnyError, ValidatedMessage)>>), } /// Interface for message publishers pub trait Publisher { /// The list of identifiers for successfully published messages type MessageId:'static; /// The future that the `publish` method returns type PublishFuture: Future<Output = PublisherResult<Self::MessageId>> + Send; /// Publish a batch of messages /// /// # Return value /// /// Shall return [`PublisherResult::Success`](PublisherResult::Success) only if all of the /// messages are successfully published. Otherwise `PublisherResult::OneError` or /// `PublisherResult::PerMessage` shall be returned to indicate an error. fn publish(&self, topic: &'static str, messages: Vec<ValidatedMessage>) -> Self::PublishFuture; } /// Type alias for custom headers associated with a message type Headers = HashMap<String, String>; struct Validator { scope: Scope, schema_id: url::Url, } impl Validator { fn new(schema: &str) -> Result<Validator, Error> { let master_schema: serde_json::Value = serde_json::from_str(schema).map_err(Error::SchemaDeserialize)?; let mut scope = Scope::new(); let schema_id = scope.compile(master_schema, false)?; Ok(Validator { scope, schema_id }) } fn validate<D, T>( &self, message: &Message<D, T>, schema: &str, ) -> Result<ValidationState, Error> where D: serde::Serialize, { // convert user.created/1.0 -> user.created/1.* let msg_schema_ptr = schema.trim_end_matches(char::is_numeric).to_owned() + "*"; let msg_schema_url = url::Url::parse(&msg_schema_ptr) .map_err(|e| Error::SchemaUrlParse(e, msg_schema_ptr))?; let msg_schema = self .scope .resolve(&msg_schema_url) .ok_or_else(|| Error::SchemaUrlResolve(msg_schema_url))?; let msg_data = serde_json::to_value(&message.data).map_err(Error::MessageSerialize)?; let validation_state = msg_schema.validate(&msg_data); if!validation_state.is_strictly_valid() { return Err(Error::DataValidation(format!("{:?}", validation_state))); } Ok(validation_state) } } /// Major part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MajorVersion(pub u8); impl fmt::Display for MajorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// Minor part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MinorVersion(pub u8); impl fmt::Display for MinorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// A semver version without patch part #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct Version(pub MajorVersion, pub MinorVersion); impl serde::Serialize for Version { fn serialize<S>( &self, serializer: S, ) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error> where S: serde::Serializer, { serializer.serialize_str(format!("{}.{}", self.0, self.1).as_ref()) } } /// Mapping of message types to Hedwig topics /// /// # Examples /// ``` /// # use serde::Serialize; /// # use strum_macros::IntoStaticStr; /// use hedwig::{MajorVersion, MessageRouter}; /// /// # #[derive(Clone, Copy, IntoStaticStr, Hash, PartialEq, Eq)] /// # enum MessageType { /// # #[strum(serialize = "user.created")] /// # UserCreated, /// # } /// # /// let r: MessageRouter<MessageType> = |t, v| match (t, v) { /// (MessageType::UserCreated, MajorVersion(1)) => Some("user-created-v1"), /// _ => None, /// }; /// ``` pub type MessageRouter<T> = fn(T, MajorVersion) -> Option<&'static str>; /// The core type in this library #[allow(missing_debug_implementations)] pub struct Hedwig<T, P> { validator: Validator, publisher_name: String, message_router: MessageRouter<T>, publisher: P, } impl<T, P> Hedwig<T, P> where P: Publisher, { /// Creates a new Hedwig instance /// /// # Arguments /// /// * `schema`: The JSON schema content. It's up to the caller to read the schema; /// * `publisher_name`: Name of the publisher service which will be included in the message /// metadata; /// * `publisher`: An implementation of Publisher; pub fn new( schema: &str, publisher_name: &str, publisher: P, message_router: MessageRouter<T>, ) -> Result<Hedwig<T, P>, Error> { Ok(Hedwig { validator: Validator::new(schema)?, publisher_name: String::from(publisher_name), message_router, publisher, }) } /// Create a batch of messages to publish /// /// This allows to transparently retry failed messages and send them in batches larger than /// one, leading to potential throughput gains. pub fn build_batch(&self) -> PublishBatch<T, P> { PublishBatch { hedwig: self, messages: Vec::new(), } } /// Publish a single message /// /// Note, that unlike the batch builder, this does not allow recovering failed-to-publish /// messages. pub async fn publish<D>(&self, msg: Message<D, T>) -> Result<P::MessageId, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let mut builder = self.build_batch(); builder.message(msg)?; builder.publish_one().await } } /// A builder for publishing in batches /// /// Among other things this structure also enables transparent retrying of failed-to-publish /// messages. #[allow(missing_debug_implementations)] pub struct PublishBatch<'hedwig, T, P> { hedwig: &'hedwig Hedwig<T, P>, messages: Vec<(&'static str, ValidatedMessage)>, } impl<'hedwig, T, P> PublishBatch<'hedwig, T, P> { /// Add a message to be published in a batch pub fn message<D>(&mut self, msg: Message<D, T>) -> Result<&mut Self, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let data_type = msg.data_type; let schema_version = msg.data_schema_version; let data_type_str = msg.data_type.into(); let schema_url = format!( "{}#/schemas/{}/{}.{}", self.hedwig.validator.schema_id, data_type_str, schema_version.0, schema_version.1, ); self.hedwig.validator.validate(&msg, &schema_url)?; let converted = msg .into_schema( self.hedwig.publisher_name.clone(), schema_url, FORMAT_VERSION_V1, ) .map_err(Error::MessageSerialize)?; let route = (self.hedwig.message_router)(data_type, converted.format_version.0) .ok_or_else(|| Error::MessageRoute(converted.id))?; self.messages.push((route, converted)); Ok(self) } /// Publish all the messages /// /// Does not consume the builder. Will return `Ok` only if and when all of the messages from /// the builder have been published successfully. In case of failure, unpublished messages will /// remain enqueued in this builder for a subsequent publish call. pub async fn publish(&mut self) -> Result<Vec<P::MessageId>, Error> where P: Publisher, { let mut message_ids = Vec::with_capacity(self.messages.len()); // Sort the messages by the topic and group them into batches self.messages.sort_by_key(|&(k, _)| k); let mut current_topic = ""; let mut current_batch = Vec::new(); let mut futures_unordered = futures::stream::FuturesUnordered::new(); let publisher = &self.hedwig.publisher; let make_job = |topic: &'static str, batch: Vec<ValidatedMessage>| async move { (topic, publisher.publish(topic, batch).await) }; for (topic, message) in mem::replace(&mut self.messages, Vec::new()) { if current_topic!= topic &&!current_batch.is_empty() { let batch = mem::replace(&mut current_batch, Vec::new()); futures_unordered.push(make_job(current_topic, batch)); } current_topic = topic; current_batch.push(message) } if!current_batch.is_empty() { futures_unordered.push(make_job(current_topic, current_batch)); } let mut errors = Vec::new(); // Extract the results from all the futures while let (Some(result), stream) = futures_unordered.into_future().await { match result { (_, PublisherResult::Success(ids)) => message_ids.extend(ids), (topic, PublisherResult::OneError(err, failed_msgs)) => { self.messages .extend(failed_msgs.into_iter().map(|m| (topic, m))); errors.push(err); } (topic, PublisherResult::PerMessage(vec)) => { for message in vec { match message { Ok(id) => message_ids.push(id), Err((err, failed_msg)) => { self.messages.push((topic, failed_msg)); errors.push(err); } } } } } futures_unordered = stream; } if let Some(first_error) = errors.pop() { Err(if errors.is_empty() { Error::Publisher(first_error) } else { Error::PublisherMultiple(first_error, errors) }) } else { Ok(message_ids) } } /// Publishes just one message /// /// Panics if the builder contains anything but 1 message. async fn publish_one(mut self) -> Result<P::MessageId, Error> where P: Publisher, { let (topic, message) = if let Some(v) = self.messages.pop() { assert!( self.messages.is_empty(), "messages buffer must contain exactly 1 entry!" ); v } else { panic!("messages buffer must contain exactly 1 entry!") }; match self.hedwig.publisher.publish(topic, vec![message]).await { PublisherResult::Success(mut ids) if ids.len() == 1 => Ok(ids.pop().unwrap()), PublisherResult::OneError(err, _) => Err(Error::Publisher(err)), PublisherResult::PerMessage(mut results) if results.len() == 1 => { results.pop().unwrap().map_err(|(e, _)| Error::Publisher(e)) } _ => { panic!("Publisher should have returned 1 result only!"); } } } } /// A message builder #[derive(Clone, Debug, PartialEq)] pub struct Message<D, T> { /// Message identifier id: Option<Uuid>, /// Creation timestamp timestamp: std::time::Duration, /// Message headers headers: Option<Headers>, /// Message data data: D, /// Message type data_type: T, data_schema_version: Version, } impl<D, T> Message<D, T> { /// Construct a new message pub fn new(data_type: T, data_schema_version: Version, data: D) -> Self { Message { id: None, timestamp: SystemTime::now() .duration_since(UNIX_EPOCH) .expect("time is before the unix epoch"), headers: None, data, data_type, data_schema_version, } } /// Overwrite the header map associated with the message /// /// This may be used to track the `request_id`, for example. pub fn headers(mut self, headers: Headers) -> Self { self.headers = Some(headers); self } /// Add a custom header to the message /// /// This may be used to track the `request_id`, for example. pub fn header<H, V>(mut self, header: H, value: V) -> Self where H: Into<String>, V: Into<String>, { if let Some(ref mut hdrs) = self.headers { hdrs.insert(header.into(), value.into()); } else { let mut map = HashMap::new(); map.insert(header.into(), value.into()); self.headers = Some(map); } self } /// Add custom id to the message /// /// If not called, a random UUID is generated for this message. pub fn id(mut self, id: Uuid) -> Self { self.id = Some(id); self } fn into_schema( self, publisher_name: String, schema: String, format_version: Version, ) -> Result<ValidatedMessage, serde_json::Error> where D: serde::Serialize,
} /// Additional metadata associated with a message #[derive(Clone, Debug, PartialEq, serde::Serialize)] struct Metadata { /// The timestamp when message was created in the publishing service timestamp: u128, /// Name of the publishing service publisher: String, /// Custom headers /// /// This may be used to track request_id, for example. headers: Headers, } /// A validated message /// /// This data type is the schema or the json messages being sent over the wire. #[derive(Debug, serde::Serialize)] pub struct ValidatedMessage { /// Unique message identifier id: Uuid, /// The metadata associated with the message metadata: Metadata, /// URI of the schema validating this message /// /// E.g. `https://hedwig.domain.xyz/schemas#/schemas/user.created/1.0` schema: String, /// Format of the message schema used format_version: Version, /// The message data data: serde_json::Value, } #[cfg(test)] mod tests { use super::*; use strum_macros::IntoStaticStr; #[derive(Clone, Copy, Debug, IntoStaticStr, Hash, PartialEq, Eq)] enum MessageType { #[strum(serialize = "user.created")] UserCreated, #[strum(serialize = "invalid.schema")] InvalidSchema, #[strum(serialize = "invalid.route")] InvalidRoute, } #[derive(Clone, Debug, serde::Serialize, PartialEq)] struct UserCreatedData { user_id: String, } const VERSION_1_0: Version = Version(MajorVersion(1), MinorVersion(0)); const SCHEMA: &str = r#" { "$id": "https://hedwig.standard.ai/schema", "$schema": "https://json-schema.org/draft-04/schema#", "description": "Example Schema", "schemas": { "user.created": { "1.*": { "description": "A new user was created", "type": "object", "x-versions": [ "1.0" ], "required": [ "user_id" ], "properties": { "user_id": { "$ref": "https://hedwig.standard.ai/schema#/definitions/UserId/1.0" } } } }, "invalid.route": { "1.*": {} } }, "definitions": { "UserId": { "1.0": { "type": "string" } } } }"#; fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { match (t, v) { (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), (MessageType::InvalidSchema, MajorVersion(1)) => Some("invalid-schema"), _ => None, } } fn mock_hedwig() -> Hedwig<MessageType, publishers::MockPublisher> { Hedwig::new( SCHEMA, "myapp", publishers::MockPublisher::default(), router, ) .unwrap() }
{ Ok(ValidatedMessage { id: self.id.unwrap_or_else(Uuid::new_v4), metadata: Metadata { timestamp: self.timestamp.as_millis(), publisher: publisher_name, headers: self.headers.unwrap_or_else(HashMap::new), }, schema, format_version, data: serde_json::to_value(self.data)?, }) }
identifier_body
lib.rs
# struct UserCreatedData { //! # user_id: String, //! # } //! # //! fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { //! match (t, v) { //! (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), //! _ => None, //! } //! } //! //! // create a publisher instance //! let publisher = MockPublisher::default(); //! let hedwig = Hedwig::new( //! schema, //! "myapp", //! publisher, //! router, //! )?; //! //! async { //! let published_ids = hedwig.publish(Message::new( //! MessageType::UserCreated, //! Version(MajorVersion(1), MinorVersion(0)), //! UserCreatedData { user_id: "U_123".into() } //! )).await; //! }; //! //! # Ok(()) //! # } //! ``` #![deny(missing_docs, unused_import_braces, unused_qualifications)] #![warn(trivial_casts, trivial_numeric_casts, unsafe_code, unstable_features)] use std::{ collections::HashMap, fmt, future::Future, mem, time::{SystemTime, UNIX_EPOCH}, }; use futures::stream::StreamExt; use uuid::Uuid; use valico::json_schema::{SchemaError, Scope, ValidationState}; #[cfg(feature = "google")] mod google_publisher; mod mock_publisher; mod null_publisher; /// Implementations of the Publisher trait pub mod publishers { #[cfg(feature = "google")] pub use super::google_publisher::GooglePubSubPublisher; pub use super::mock_publisher::MockPublisher; pub use super::null_publisher::NullPublisher; } const FORMAT_VERSION_V1: Version = Version(MajorVersion(1), MinorVersion(0)); /// All errors that may be returned when instantiating a new Hedwig instance. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// Unable to deserialize schema #[error("Unable to deserialize schema")] SchemaDeserialize(#[source] serde_json::Error), /// Schema failed to compile #[error("Schema failed to compile")] SchemaCompile(#[from] SchemaError), /// Unable to serialize message #[error("Unable to serialize message")] MessageSerialize(#[source] serde_json::Error), /// Message is not routable #[error("Message {0} is not routable")] MessageRoute(Uuid), /// Could not parse a schema URL #[error("Could not parse `{1}` as a schema URL")] SchemaUrlParse(#[source] url::ParseError, String), /// Could not resolve the schema URL #[error("Could not resolve `{0}` to a schema")] SchemaUrlResolve(url::Url), /// Could not validate message data #[error("Message data does not validate per the schema: {0}")] DataValidation(String), /// Publisher failed to publish a message #[error("Publisher failed to publish a message batch")] Publisher(#[source] Box<dyn std::error::Error + Send + Sync>), /// Publisher failed to publish multiple batches of messages #[error("Publisher failed to publish multiple batches (total of {} errors)", _1.len() + 1)] PublisherMultiple( #[source] Box<dyn std::error::Error + Send + Sync>, Vec<Box<dyn std::error::Error + Send + Sync>>, ), } type AnyError = Box<dyn std::error::Error + Send + Sync>; /// The special result type for [`Publisher::publish`](trait.Publisher.html) #[derive(Debug)] pub enum PublisherResult<Id> { /// Publisher succeeded. /// /// Contains a vector of published message IDs. Success(Vec<Id>), /// Publisher failed to publish any of the messages. OneError(AnyError, Vec<ValidatedMessage>), /// Publisher failed to publish some of the messages. /// /// The error type has a per-message granularity. PerMessage(Vec<Result<Id, (AnyError, ValidatedMessage)>>), } /// Interface for message publishers pub trait Publisher { /// The list of identifiers for successfully published messages type MessageId:'static; /// The future that the `publish` method returns type PublishFuture: Future<Output = PublisherResult<Self::MessageId>> + Send; /// Publish a batch of messages /// /// # Return value /// /// Shall return [`PublisherResult::Success`](PublisherResult::Success) only if all of the /// messages are successfully published. Otherwise `PublisherResult::OneError` or /// `PublisherResult::PerMessage` shall be returned to indicate an error. fn publish(&self, topic: &'static str, messages: Vec<ValidatedMessage>) -> Self::PublishFuture; } /// Type alias for custom headers associated with a message type Headers = HashMap<String, String>; struct Validator { scope: Scope, schema_id: url::Url, } impl Validator { fn new(schema: &str) -> Result<Validator, Error> { let master_schema: serde_json::Value = serde_json::from_str(schema).map_err(Error::SchemaDeserialize)?; let mut scope = Scope::new(); let schema_id = scope.compile(master_schema, false)?; Ok(Validator { scope, schema_id }) } fn validate<D, T>( &self, message: &Message<D, T>, schema: &str, ) -> Result<ValidationState, Error> where D: serde::Serialize, { // convert user.created/1.0 -> user.created/1.* let msg_schema_ptr = schema.trim_end_matches(char::is_numeric).to_owned() + "*"; let msg_schema_url = url::Url::parse(&msg_schema_ptr) .map_err(|e| Error::SchemaUrlParse(e, msg_schema_ptr))?; let msg_schema = self .scope .resolve(&msg_schema_url) .ok_or_else(|| Error::SchemaUrlResolve(msg_schema_url))?; let msg_data = serde_json::to_value(&message.data).map_err(Error::MessageSerialize)?; let validation_state = msg_schema.validate(&msg_data); if!validation_state.is_strictly_valid() { return Err(Error::DataValidation(format!("{:?}", validation_state))); } Ok(validation_state) } } /// Major part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MajorVersion(pub u8); impl fmt::Display for MajorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// Minor part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MinorVersion(pub u8); impl fmt::Display for MinorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// A semver version without patch part #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct Version(pub MajorVersion, pub MinorVersion); impl serde::Serialize for Version { fn serialize<S>( &self, serializer: S, ) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error> where S: serde::Serializer, { serializer.serialize_str(format!("{}.{}", self.0, self.1).as_ref()) } } /// Mapping of message types to Hedwig topics /// /// # Examples /// ``` /// # use serde::Serialize; /// # use strum_macros::IntoStaticStr; /// use hedwig::{MajorVersion, MessageRouter}; /// /// # #[derive(Clone, Copy, IntoStaticStr, Hash, PartialEq, Eq)] /// # enum MessageType { /// # #[strum(serialize = "user.created")] /// # UserCreated, /// # } /// # /// let r: MessageRouter<MessageType> = |t, v| match (t, v) { /// (MessageType::UserCreated, MajorVersion(1)) => Some("user-created-v1"), /// _ => None, /// }; /// ``` pub type MessageRouter<T> = fn(T, MajorVersion) -> Option<&'static str>; /// The core type in this library #[allow(missing_debug_implementations)] pub struct Hedwig<T, P> { validator: Validator, publisher_name: String, message_router: MessageRouter<T>, publisher: P, } impl<T, P> Hedwig<T, P> where P: Publisher, { /// Creates a new Hedwig instance /// /// # Arguments /// /// * `schema`: The JSON schema content. It's up to the caller to read the schema; /// * `publisher_name`: Name of the publisher service which will be included in the message /// metadata; /// * `publisher`: An implementation of Publisher; pub fn new( schema: &str, publisher_name: &str, publisher: P, message_router: MessageRouter<T>, ) -> Result<Hedwig<T, P>, Error> { Ok(Hedwig { validator: Validator::new(schema)?, publisher_name: String::from(publisher_name), message_router, publisher, }) } /// Create a batch of messages to publish /// /// This allows to transparently retry failed messages and send them in batches larger than /// one, leading to potential throughput gains. pub fn build_batch(&self) -> PublishBatch<T, P> { PublishBatch { hedwig: self, messages: Vec::new(), } } /// Publish a single message /// /// Note, that unlike the batch builder, this does not allow recovering failed-to-publish /// messages. pub async fn publish<D>(&self, msg: Message<D, T>) -> Result<P::MessageId, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let mut builder = self.build_batch(); builder.message(msg)?; builder.publish_one().await } } /// A builder for publishing in batches /// /// Among other things this structure also enables transparent retrying of failed-to-publish /// messages. #[allow(missing_debug_implementations)] pub struct PublishBatch<'hedwig, T, P> { hedwig: &'hedwig Hedwig<T, P>, messages: Vec<(&'static str, ValidatedMessage)>, } impl<'hedwig, T, P> PublishBatch<'hedwig, T, P> { /// Add a message to be published in a batch pub fn message<D>(&mut self, msg: Message<D, T>) -> Result<&mut Self, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let data_type = msg.data_type; let schema_version = msg.data_schema_version; let data_type_str = msg.data_type.into(); let schema_url = format!( "{}#/schemas/{}/{}.{}", self.hedwig.validator.schema_id, data_type_str, schema_version.0, schema_version.1, ); self.hedwig.validator.validate(&msg, &schema_url)?; let converted = msg .into_schema( self.hedwig.publisher_name.clone(), schema_url, FORMAT_VERSION_V1, ) .map_err(Error::MessageSerialize)?; let route = (self.hedwig.message_router)(data_type, converted.format_version.0) .ok_or_else(|| Error::MessageRoute(converted.id))?; self.messages.push((route, converted)); Ok(self) } /// Publish all the messages /// /// Does not consume the builder. Will return `Ok` only if and when all of the messages from /// the builder have been published successfully. In case of failure, unpublished messages will /// remain enqueued in this builder for a subsequent publish call. pub async fn publish(&mut self) -> Result<Vec<P::MessageId>, Error> where P: Publisher, { let mut message_ids = Vec::with_capacity(self.messages.len()); // Sort the messages by the topic and group them into batches self.messages.sort_by_key(|&(k, _)| k); let mut current_topic = ""; let mut current_batch = Vec::new(); let mut futures_unordered = futures::stream::FuturesUnordered::new(); let publisher = &self.hedwig.publisher; let make_job = |topic: &'static str, batch: Vec<ValidatedMessage>| async move { (topic, publisher.publish(topic, batch).await) }; for (topic, message) in mem::replace(&mut self.messages, Vec::new()) { if current_topic!= topic &&!current_batch.is_empty() { let batch = mem::replace(&mut current_batch, Vec::new()); futures_unordered.push(make_job(current_topic, batch)); } current_topic = topic; current_batch.push(message) } if!current_batch.is_empty() { futures_unordered.push(make_job(current_topic, current_batch)); } let mut errors = Vec::new(); // Extract the results from all the futures while let (Some(result), stream) = futures_unordered.into_future().await { match result { (_, PublisherResult::Success(ids)) => message_ids.extend(ids), (topic, PublisherResult::OneError(err, failed_msgs)) => { self.messages .extend(failed_msgs.into_iter().map(|m| (topic, m))); errors.push(err); } (topic, PublisherResult::PerMessage(vec)) => { for message in vec { match message { Ok(id) => message_ids.push(id), Err((err, failed_msg)) => { self.messages.push((topic, failed_msg)); errors.push(err); } } } } } futures_unordered = stream; } if let Some(first_error) = errors.pop() { Err(if errors.is_empty() { Error::Publisher(first_error) } else { Error::PublisherMultiple(first_error, errors) }) } else { Ok(message_ids) } } /// Publishes just one message /// /// Panics if the builder contains anything but 1 message. async fn publish_one(mut self) -> Result<P::MessageId, Error> where P: Publisher, { let (topic, message) = if let Some(v) = self.messages.pop() { assert!( self.messages.is_empty(), "messages buffer must contain exactly 1 entry!" ); v } else { panic!("messages buffer must contain exactly 1 entry!") }; match self.hedwig.publisher.publish(topic, vec![message]).await { PublisherResult::Success(mut ids) if ids.len() == 1 => Ok(ids.pop().unwrap()), PublisherResult::OneError(err, _) => Err(Error::Publisher(err)), PublisherResult::PerMessage(mut results) if results.len() == 1 => { results.pop().unwrap().map_err(|(e, _)| Error::Publisher(e)) } _ => { panic!("Publisher should have returned 1 result only!"); } } } } /// A message builder #[derive(Clone, Debug, PartialEq)] pub struct Message<D, T> { /// Message identifier id: Option<Uuid>, /// Creation timestamp timestamp: std::time::Duration, /// Message headers headers: Option<Headers>, /// Message data data: D, /// Message type data_type: T, data_schema_version: Version, } impl<D, T> Message<D, T> { /// Construct a new message pub fn new(data_type: T, data_schema_version: Version, data: D) -> Self { Message { id: None, timestamp: SystemTime::now() .duration_since(UNIX_EPOCH) .expect("time is before the unix epoch"), headers: None, data, data_type, data_schema_version, } } /// Overwrite the header map associated with the message /// /// This may be used to track the `request_id`, for example. pub fn headers(mut self, headers: Headers) -> Self { self.headers = Some(headers); self } /// Add a custom header to the message /// /// This may be used to track the `request_id`, for example. pub fn header<H, V>(mut self, header: H, value: V) -> Self where H: Into<String>, V: Into<String>, { if let Some(ref mut hdrs) = self.headers
else { let mut map = HashMap::new(); map.insert(header.into(), value.into()); self.headers = Some(map); } self } /// Add custom id to the message /// /// If not called, a random UUID is generated for this message. pub fn id(mut self, id: Uuid) -> Self { self.id = Some(id); self } fn into_schema( self, publisher_name: String, schema: String, format_version: Version, ) -> Result<ValidatedMessage, serde_json::Error> where D: serde::Serialize, { Ok(ValidatedMessage { id: self.id.unwrap_or_else(Uuid::new_v4), metadata: Metadata { timestamp: self.timestamp.as_millis(), publisher: publisher_name, headers: self.headers.unwrap_or_else(HashMap::new), }, schema, format_version, data: serde_json::to_value(self.data)?, }) } } /// Additional metadata associated with a message #[derive(Clone, Debug, PartialEq, serde::Serialize)] struct Metadata { /// The timestamp when message was created in the publishing service timestamp: u128, /// Name of the publishing service publisher: String, /// Custom headers /// /// This may be used to track request_id, for example. headers: Headers, } /// A validated message /// /// This data type is the schema or the json messages being sent over the wire. #[derive(Debug, serde::Serialize)] pub struct ValidatedMessage { /// Unique message identifier id: Uuid, /// The metadata associated with the message metadata: Metadata, /// URI of the schema validating this message /// /// E.g. `https://hedwig.domain.xyz/schemas#/schemas/user.created/1.0` schema: String, /// Format of the message schema used format_version: Version, /// The message data data: serde_json::Value, } #[cfg(test)] mod tests { use super::*; use strum_macros::IntoStaticStr; #[derive(Clone, Copy, Debug, IntoStaticStr, Hash, PartialEq, Eq)] enum MessageType { #[strum(serialize = "user.created")] UserCreated, #[strum(serialize = "invalid.schema")] InvalidSchema, #[strum(serialize = "invalid.route")] InvalidRoute, } #[derive(Clone, Debug, serde::Serialize, PartialEq)] struct UserCreatedData { user_id: String, } const VERSION_1_0: Version = Version(MajorVersion(1), MinorVersion(0)); const SCHEMA: &str = r#" { "$id": "https://hedwig.standard.ai/schema", "$schema": "https://json-schema.org/draft-04/schema#", "description": "Example Schema", "schemas": { "user.created": { "1.*": { "description": "A new user was created", "type": "object", "x-versions": [ "1.0" ], "required": [ "user_id" ], "properties": { "user_id": { "$ref": "https://hedwig.standard.ai/schema#/definitions/UserId/1.0" } } } }, "invalid.route": { "1.*": {} } }, "definitions": { "UserId": { "1.0": { "type": "string" } } } }"#; fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { match (t, v) { (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), (MessageType::InvalidSchema, MajorVersion(1)) => Some("invalid-schema"), _ => None, } } fn mock_hedwig() -> Hedwig<MessageType, publishers::MockPublisher> { Hedwig::new( SCHEMA, "myapp", publishers::MockPublisher::default(), router, ) .unwrap() }
{ hdrs.insert(header.into(), value.into()); }
conditional_block
lib.rs
# struct UserCreatedData { //! # user_id: String, //! # } //! # //! fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { //! match (t, v) { //! (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), //! _ => None, //! } //! } //! //! // create a publisher instance //! let publisher = MockPublisher::default(); //! let hedwig = Hedwig::new( //! schema, //! "myapp", //! publisher, //! router, //! )?; //! //! async { //! let published_ids = hedwig.publish(Message::new( //! MessageType::UserCreated, //! Version(MajorVersion(1), MinorVersion(0)), //! UserCreatedData { user_id: "U_123".into() } //! )).await; //! }; //! //! # Ok(()) //! # } //! ``` #![deny(missing_docs, unused_import_braces, unused_qualifications)] #![warn(trivial_casts, trivial_numeric_casts, unsafe_code, unstable_features)] use std::{ collections::HashMap, fmt, future::Future, mem, time::{SystemTime, UNIX_EPOCH}, }; use futures::stream::StreamExt; use uuid::Uuid; use valico::json_schema::{SchemaError, Scope, ValidationState}; #[cfg(feature = "google")] mod google_publisher; mod mock_publisher; mod null_publisher; /// Implementations of the Publisher trait pub mod publishers { #[cfg(feature = "google")] pub use super::google_publisher::GooglePubSubPublisher; pub use super::mock_publisher::MockPublisher; pub use super::null_publisher::NullPublisher; } const FORMAT_VERSION_V1: Version = Version(MajorVersion(1), MinorVersion(0)); /// All errors that may be returned when instantiating a new Hedwig instance. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// Unable to deserialize schema #[error("Unable to deserialize schema")] SchemaDeserialize(#[source] serde_json::Error), /// Schema failed to compile #[error("Schema failed to compile")] SchemaCompile(#[from] SchemaError), /// Unable to serialize message #[error("Unable to serialize message")] MessageSerialize(#[source] serde_json::Error), /// Message is not routable #[error("Message {0} is not routable")] MessageRoute(Uuid), /// Could not parse a schema URL #[error("Could not parse `{1}` as a schema URL")] SchemaUrlParse(#[source] url::ParseError, String), /// Could not resolve the schema URL #[error("Could not resolve `{0}` to a schema")] SchemaUrlResolve(url::Url), /// Could not validate message data #[error("Message data does not validate per the schema: {0}")] DataValidation(String), /// Publisher failed to publish a message #[error("Publisher failed to publish a message batch")] Publisher(#[source] Box<dyn std::error::Error + Send + Sync>), /// Publisher failed to publish multiple batches of messages #[error("Publisher failed to publish multiple batches (total of {} errors)", _1.len() + 1)] PublisherMultiple( #[source] Box<dyn std::error::Error + Send + Sync>, Vec<Box<dyn std::error::Error + Send + Sync>>, ), } type AnyError = Box<dyn std::error::Error + Send + Sync>; /// The special result type for [`Publisher::publish`](trait.Publisher.html) #[derive(Debug)] pub enum PublisherResult<Id> { /// Publisher succeeded. /// /// Contains a vector of published message IDs. Success(Vec<Id>), /// Publisher failed to publish any of the messages. OneError(AnyError, Vec<ValidatedMessage>), /// Publisher failed to publish some of the messages. /// /// The error type has a per-message granularity. PerMessage(Vec<Result<Id, (AnyError, ValidatedMessage)>>), } /// Interface for message publishers pub trait Publisher { /// The list of identifiers for successfully published messages type MessageId:'static; /// The future that the `publish` method returns type PublishFuture: Future<Output = PublisherResult<Self::MessageId>> + Send; /// Publish a batch of messages /// /// # Return value /// /// Shall return [`PublisherResult::Success`](PublisherResult::Success) only if all of the /// messages are successfully published. Otherwise `PublisherResult::OneError` or /// `PublisherResult::PerMessage` shall be returned to indicate an error. fn publish(&self, topic: &'static str, messages: Vec<ValidatedMessage>) -> Self::PublishFuture; } /// Type alias for custom headers associated with a message type Headers = HashMap<String, String>; struct Validator { scope: Scope, schema_id: url::Url, } impl Validator { fn new(schema: &str) -> Result<Validator, Error> { let master_schema: serde_json::Value = serde_json::from_str(schema).map_err(Error::SchemaDeserialize)?; let mut scope = Scope::new(); let schema_id = scope.compile(master_schema, false)?; Ok(Validator { scope, schema_id }) } fn validate<D, T>( &self, message: &Message<D, T>, schema: &str, ) -> Result<ValidationState, Error> where D: serde::Serialize, { // convert user.created/1.0 -> user.created/1.* let msg_schema_ptr = schema.trim_end_matches(char::is_numeric).to_owned() + "*"; let msg_schema_url = url::Url::parse(&msg_schema_ptr) .map_err(|e| Error::SchemaUrlParse(e, msg_schema_ptr))?; let msg_schema = self .scope .resolve(&msg_schema_url) .ok_or_else(|| Error::SchemaUrlResolve(msg_schema_url))?; let msg_data = serde_json::to_value(&message.data).map_err(Error::MessageSerialize)?; let validation_state = msg_schema.validate(&msg_data); if!validation_state.is_strictly_valid() { return Err(Error::DataValidation(format!("{:?}", validation_state))); } Ok(validation_state) } } /// Major part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MajorVersion(pub u8); impl fmt::Display for MajorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// Minor part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MinorVersion(pub u8); impl fmt::Display for MinorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// A semver version without patch part #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct Version(pub MajorVersion, pub MinorVersion); impl serde::Serialize for Version { fn serialize<S>( &self, serializer: S, ) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error> where S: serde::Serializer, { serializer.serialize_str(format!("{}.{}", self.0, self.1).as_ref()) } } /// Mapping of message types to Hedwig topics /// /// # Examples /// ``` /// # use serde::Serialize; /// # use strum_macros::IntoStaticStr; /// use hedwig::{MajorVersion, MessageRouter}; /// /// # #[derive(Clone, Copy, IntoStaticStr, Hash, PartialEq, Eq)] /// # enum MessageType { /// # #[strum(serialize = "user.created")] /// # UserCreated, /// # } /// # /// let r: MessageRouter<MessageType> = |t, v| match (t, v) { /// (MessageType::UserCreated, MajorVersion(1)) => Some("user-created-v1"), /// _ => None, /// }; /// ``` pub type MessageRouter<T> = fn(T, MajorVersion) -> Option<&'static str>; /// The core type in this library #[allow(missing_debug_implementations)] pub struct Hedwig<T, P> { validator: Validator, publisher_name: String, message_router: MessageRouter<T>, publisher: P, } impl<T, P> Hedwig<T, P> where P: Publisher, { /// Creates a new Hedwig instance /// /// # Arguments /// /// * `schema`: The JSON schema content. It's up to the caller to read the schema; /// * `publisher_name`: Name of the publisher service which will be included in the message /// metadata; /// * `publisher`: An implementation of Publisher; pub fn new( schema: &str, publisher_name: &str, publisher: P, message_router: MessageRouter<T>, ) -> Result<Hedwig<T, P>, Error> { Ok(Hedwig { validator: Validator::new(schema)?, publisher_name: String::from(publisher_name), message_router, publisher, }) } /// Create a batch of messages to publish /// /// This allows to transparently retry failed messages and send them in batches larger than /// one, leading to potential throughput gains. pub fn build_batch(&self) -> PublishBatch<T, P> { PublishBatch { hedwig: self, messages: Vec::new(), } } /// Publish a single message /// /// Note, that unlike the batch builder, this does not allow recovering failed-to-publish /// messages. pub async fn publish<D>(&self, msg: Message<D, T>) -> Result<P::MessageId, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let mut builder = self.build_batch(); builder.message(msg)?; builder.publish_one().await } } /// A builder for publishing in batches /// /// Among other things this structure also enables transparent retrying of failed-to-publish /// messages. #[allow(missing_debug_implementations)] pub struct PublishBatch<'hedwig, T, P> { hedwig: &'hedwig Hedwig<T, P>, messages: Vec<(&'static str, ValidatedMessage)>, } impl<'hedwig, T, P> PublishBatch<'hedwig, T, P> { /// Add a message to be published in a batch pub fn message<D>(&mut self, msg: Message<D, T>) -> Result<&mut Self, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let data_type = msg.data_type; let schema_version = msg.data_schema_version; let data_type_str = msg.data_type.into(); let schema_url = format!( "{}#/schemas/{}/{}.{}", self.hedwig.validator.schema_id, data_type_str, schema_version.0, schema_version.1, ); self.hedwig.validator.validate(&msg, &schema_url)?; let converted = msg .into_schema( self.hedwig.publisher_name.clone(), schema_url, FORMAT_VERSION_V1, ) .map_err(Error::MessageSerialize)?; let route = (self.hedwig.message_router)(data_type, converted.format_version.0) .ok_or_else(|| Error::MessageRoute(converted.id))?; self.messages.push((route, converted)); Ok(self) } /// Publish all the messages /// /// Does not consume the builder. Will return `Ok` only if and when all of the messages from /// the builder have been published successfully. In case of failure, unpublished messages will /// remain enqueued in this builder for a subsequent publish call. pub async fn publish(&mut self) -> Result<Vec<P::MessageId>, Error> where P: Publisher, { let mut message_ids = Vec::with_capacity(self.messages.len()); // Sort the messages by the topic and group them into batches self.messages.sort_by_key(|&(k, _)| k); let mut current_topic = ""; let mut current_batch = Vec::new(); let mut futures_unordered = futures::stream::FuturesUnordered::new(); let publisher = &self.hedwig.publisher; let make_job = |topic: &'static str, batch: Vec<ValidatedMessage>| async move { (topic, publisher.publish(topic, batch).await) }; for (topic, message) in mem::replace(&mut self.messages, Vec::new()) { if current_topic!= topic &&!current_batch.is_empty() { let batch = mem::replace(&mut current_batch, Vec::new()); futures_unordered.push(make_job(current_topic, batch)); } current_topic = topic; current_batch.push(message) } if!current_batch.is_empty() { futures_unordered.push(make_job(current_topic, current_batch)); } let mut errors = Vec::new(); // Extract the results from all the futures while let (Some(result), stream) = futures_unordered.into_future().await { match result { (_, PublisherResult::Success(ids)) => message_ids.extend(ids), (topic, PublisherResult::OneError(err, failed_msgs)) => { self.messages .extend(failed_msgs.into_iter().map(|m| (topic, m))); errors.push(err); } (topic, PublisherResult::PerMessage(vec)) => { for message in vec { match message { Ok(id) => message_ids.push(id), Err((err, failed_msg)) => { self.messages.push((topic, failed_msg)); errors.push(err); } } } } } futures_unordered = stream; } if let Some(first_error) = errors.pop() { Err(if errors.is_empty() { Error::Publisher(first_error) } else { Error::PublisherMultiple(first_error, errors) }) } else { Ok(message_ids) } } /// Publishes just one message /// /// Panics if the builder contains anything but 1 message. async fn publish_one(mut self) -> Result<P::MessageId, Error> where P: Publisher, { let (topic, message) = if let Some(v) = self.messages.pop() { assert!( self.messages.is_empty(), "messages buffer must contain exactly 1 entry!" ); v } else { panic!("messages buffer must contain exactly 1 entry!") }; match self.hedwig.publisher.publish(topic, vec![message]).await { PublisherResult::Success(mut ids) if ids.len() == 1 => Ok(ids.pop().unwrap()), PublisherResult::OneError(err, _) => Err(Error::Publisher(err)), PublisherResult::PerMessage(mut results) if results.len() == 1 => { results.pop().unwrap().map_err(|(e, _)| Error::Publisher(e)) } _ => { panic!("Publisher should have returned 1 result only!"); }
/// A message builder #[derive(Clone, Debug, PartialEq)] pub struct Message<D, T> { /// Message identifier id: Option<Uuid>, /// Creation timestamp timestamp: std::time::Duration, /// Message headers headers: Option<Headers>, /// Message data data: D, /// Message type data_type: T, data_schema_version: Version, } impl<D, T> Message<D, T> { /// Construct a new message pub fn new(data_type: T, data_schema_version: Version, data: D) -> Self { Message { id: None, timestamp: SystemTime::now() .duration_since(UNIX_EPOCH) .expect("time is before the unix epoch"), headers: None, data, data_type, data_schema_version, } } /// Overwrite the header map associated with the message /// /// This may be used to track the `request_id`, for example. pub fn headers(mut self, headers: Headers) -> Self { self.headers = Some(headers); self } /// Add a custom header to the message /// /// This may be used to track the `request_id`, for example. pub fn header<H, V>(mut self, header: H, value: V) -> Self where H: Into<String>, V: Into<String>, { if let Some(ref mut hdrs) = self.headers { hdrs.insert(header.into(), value.into()); } else { let mut map = HashMap::new(); map.insert(header.into(), value.into()); self.headers = Some(map); } self } /// Add custom id to the message /// /// If not called, a random UUID is generated for this message. pub fn id(mut self, id: Uuid) -> Self { self.id = Some(id); self } fn into_schema( self, publisher_name: String, schema: String, format_version: Version, ) -> Result<ValidatedMessage, serde_json::Error> where D: serde::Serialize, { Ok(ValidatedMessage { id: self.id.unwrap_or_else(Uuid::new_v4), metadata: Metadata { timestamp: self.timestamp.as_millis(), publisher: publisher_name, headers: self.headers.unwrap_or_else(HashMap::new), }, schema, format_version, data: serde_json::to_value(self.data)?, }) } } /// Additional metadata associated with a message #[derive(Clone, Debug, PartialEq, serde::Serialize)] struct Metadata { /// The timestamp when message was created in the publishing service timestamp: u128, /// Name of the publishing service publisher: String, /// Custom headers /// /// This may be used to track request_id, for example. headers: Headers, } /// A validated message /// /// This data type is the schema or the json messages being sent over the wire. #[derive(Debug, serde::Serialize)] pub struct ValidatedMessage { /// Unique message identifier id: Uuid, /// The metadata associated with the message metadata: Metadata, /// URI of the schema validating this message /// /// E.g. `https://hedwig.domain.xyz/schemas#/schemas/user.created/1.0` schema: String, /// Format of the message schema used format_version: Version, /// The message data data: serde_json::Value, } #[cfg(test)] mod tests { use super::*; use strum_macros::IntoStaticStr; #[derive(Clone, Copy, Debug, IntoStaticStr, Hash, PartialEq, Eq)] enum MessageType { #[strum(serialize = "user.created")] UserCreated, #[strum(serialize = "invalid.schema")] InvalidSchema, #[strum(serialize = "invalid.route")] InvalidRoute, } #[derive(Clone, Debug, serde::Serialize, PartialEq)] struct UserCreatedData { user_id: String, } const VERSION_1_0: Version = Version(MajorVersion(1), MinorVersion(0)); const SCHEMA: &str = r#" { "$id": "https://hedwig.standard.ai/schema", "$schema": "https://json-schema.org/draft-04/schema#", "description": "Example Schema", "schemas": { "user.created": { "1.*": { "description": "A new user was created", "type": "object", "x-versions": [ "1.0" ], "required": [ "user_id" ], "properties": { "user_id": { "$ref": "https://hedwig.standard.ai/schema#/definitions/UserId/1.0" } } } }, "invalid.route": { "1.*": {} } }, "definitions": { "UserId": { "1.0": { "type": "string" } } } }"#; fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { match (t, v) { (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), (MessageType::InvalidSchema, MajorVersion(1)) => Some("invalid-schema"), _ => None, } } fn mock_hedwig() -> Hedwig<MessageType, publishers::MockPublisher> { Hedwig::new( SCHEMA, "myapp", publishers::MockPublisher::default(), router, ) .unwrap() } #[t
} } }
random_line_split
lib.rs
# struct UserCreatedData { //! # user_id: String, //! # } //! # //! fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { //! match (t, v) { //! (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), //! _ => None, //! } //! } //! //! // create a publisher instance //! let publisher = MockPublisher::default(); //! let hedwig = Hedwig::new( //! schema, //! "myapp", //! publisher, //! router, //! )?; //! //! async { //! let published_ids = hedwig.publish(Message::new( //! MessageType::UserCreated, //! Version(MajorVersion(1), MinorVersion(0)), //! UserCreatedData { user_id: "U_123".into() } //! )).await; //! }; //! //! # Ok(()) //! # } //! ``` #![deny(missing_docs, unused_import_braces, unused_qualifications)] #![warn(trivial_casts, trivial_numeric_casts, unsafe_code, unstable_features)] use std::{ collections::HashMap, fmt, future::Future, mem, time::{SystemTime, UNIX_EPOCH}, }; use futures::stream::StreamExt; use uuid::Uuid; use valico::json_schema::{SchemaError, Scope, ValidationState}; #[cfg(feature = "google")] mod google_publisher; mod mock_publisher; mod null_publisher; /// Implementations of the Publisher trait pub mod publishers { #[cfg(feature = "google")] pub use super::google_publisher::GooglePubSubPublisher; pub use super::mock_publisher::MockPublisher; pub use super::null_publisher::NullPublisher; } const FORMAT_VERSION_V1: Version = Version(MajorVersion(1), MinorVersion(0)); /// All errors that may be returned when instantiating a new Hedwig instance. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum Error { /// Unable to deserialize schema #[error("Unable to deserialize schema")] SchemaDeserialize(#[source] serde_json::Error), /// Schema failed to compile #[error("Schema failed to compile")] SchemaCompile(#[from] SchemaError), /// Unable to serialize message #[error("Unable to serialize message")] MessageSerialize(#[source] serde_json::Error), /// Message is not routable #[error("Message {0} is not routable")] MessageRoute(Uuid), /// Could not parse a schema URL #[error("Could not parse `{1}` as a schema URL")] SchemaUrlParse(#[source] url::ParseError, String), /// Could not resolve the schema URL #[error("Could not resolve `{0}` to a schema")] SchemaUrlResolve(url::Url), /// Could not validate message data #[error("Message data does not validate per the schema: {0}")] DataValidation(String), /// Publisher failed to publish a message #[error("Publisher failed to publish a message batch")] Publisher(#[source] Box<dyn std::error::Error + Send + Sync>), /// Publisher failed to publish multiple batches of messages #[error("Publisher failed to publish multiple batches (total of {} errors)", _1.len() + 1)] PublisherMultiple( #[source] Box<dyn std::error::Error + Send + Sync>, Vec<Box<dyn std::error::Error + Send + Sync>>, ), } type AnyError = Box<dyn std::error::Error + Send + Sync>; /// The special result type for [`Publisher::publish`](trait.Publisher.html) #[derive(Debug)] pub enum PublisherResult<Id> { /// Publisher succeeded. /// /// Contains a vector of published message IDs. Success(Vec<Id>), /// Publisher failed to publish any of the messages. OneError(AnyError, Vec<ValidatedMessage>), /// Publisher failed to publish some of the messages. /// /// The error type has a per-message granularity. PerMessage(Vec<Result<Id, (AnyError, ValidatedMessage)>>), } /// Interface for message publishers pub trait Publisher { /// The list of identifiers for successfully published messages type MessageId:'static; /// The future that the `publish` method returns type PublishFuture: Future<Output = PublisherResult<Self::MessageId>> + Send; /// Publish a batch of messages /// /// # Return value /// /// Shall return [`PublisherResult::Success`](PublisherResult::Success) only if all of the /// messages are successfully published. Otherwise `PublisherResult::OneError` or /// `PublisherResult::PerMessage` shall be returned to indicate an error. fn publish(&self, topic: &'static str, messages: Vec<ValidatedMessage>) -> Self::PublishFuture; } /// Type alias for custom headers associated with a message type Headers = HashMap<String, String>; struct Validator { scope: Scope, schema_id: url::Url, } impl Validator { fn new(schema: &str) -> Result<Validator, Error> { let master_schema: serde_json::Value = serde_json::from_str(schema).map_err(Error::SchemaDeserialize)?; let mut scope = Scope::new(); let schema_id = scope.compile(master_schema, false)?; Ok(Validator { scope, schema_id }) } fn validate<D, T>( &self, message: &Message<D, T>, schema: &str, ) -> Result<ValidationState, Error> where D: serde::Serialize, { // convert user.created/1.0 -> user.created/1.* let msg_schema_ptr = schema.trim_end_matches(char::is_numeric).to_owned() + "*"; let msg_schema_url = url::Url::parse(&msg_schema_ptr) .map_err(|e| Error::SchemaUrlParse(e, msg_schema_ptr))?; let msg_schema = self .scope .resolve(&msg_schema_url) .ok_or_else(|| Error::SchemaUrlResolve(msg_schema_url))?; let msg_data = serde_json::to_value(&message.data).map_err(Error::MessageSerialize)?; let validation_state = msg_schema.validate(&msg_data); if!validation_state.is_strictly_valid() { return Err(Error::DataValidation(format!("{:?}", validation_state))); } Ok(validation_state) } } /// Major part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MajorVersion(pub u8); impl fmt::Display for MajorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// Minor part component in semver #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, serde::Serialize)] pub struct MinorVersion(pub u8); impl fmt::Display for MinorVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// A semver version without patch part #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct Version(pub MajorVersion, pub MinorVersion); impl serde::Serialize for Version { fn serialize<S>( &self, serializer: S, ) -> Result<<S as serde::Serializer>::Ok, <S as serde::Serializer>::Error> where S: serde::Serializer, { serializer.serialize_str(format!("{}.{}", self.0, self.1).as_ref()) } } /// Mapping of message types to Hedwig topics /// /// # Examples /// ``` /// # use serde::Serialize; /// # use strum_macros::IntoStaticStr; /// use hedwig::{MajorVersion, MessageRouter}; /// /// # #[derive(Clone, Copy, IntoStaticStr, Hash, PartialEq, Eq)] /// # enum MessageType { /// # #[strum(serialize = "user.created")] /// # UserCreated, /// # } /// # /// let r: MessageRouter<MessageType> = |t, v| match (t, v) { /// (MessageType::UserCreated, MajorVersion(1)) => Some("user-created-v1"), /// _ => None, /// }; /// ``` pub type MessageRouter<T> = fn(T, MajorVersion) -> Option<&'static str>; /// The core type in this library #[allow(missing_debug_implementations)] pub struct Hedwig<T, P> { validator: Validator, publisher_name: String, message_router: MessageRouter<T>, publisher: P, } impl<T, P> Hedwig<T, P> where P: Publisher, { /// Creates a new Hedwig instance /// /// # Arguments /// /// * `schema`: The JSON schema content. It's up to the caller to read the schema; /// * `publisher_name`: Name of the publisher service which will be included in the message /// metadata; /// * `publisher`: An implementation of Publisher; pub fn new( schema: &str, publisher_name: &str, publisher: P, message_router: MessageRouter<T>, ) -> Result<Hedwig<T, P>, Error> { Ok(Hedwig { validator: Validator::new(schema)?, publisher_name: String::from(publisher_name), message_router, publisher, }) } /// Create a batch of messages to publish /// /// This allows to transparently retry failed messages and send them in batches larger than /// one, leading to potential throughput gains. pub fn build_batch(&self) -> PublishBatch<T, P> { PublishBatch { hedwig: self, messages: Vec::new(), } } /// Publish a single message /// /// Note, that unlike the batch builder, this does not allow recovering failed-to-publish /// messages. pub async fn publish<D>(&self, msg: Message<D, T>) -> Result<P::MessageId, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let mut builder = self.build_batch(); builder.message(msg)?; builder.publish_one().await } } /// A builder for publishing in batches /// /// Among other things this structure also enables transparent retrying of failed-to-publish /// messages. #[allow(missing_debug_implementations)] pub struct PublishBatch<'hedwig, T, P> { hedwig: &'hedwig Hedwig<T, P>, messages: Vec<(&'static str, ValidatedMessage)>, } impl<'hedwig, T, P> PublishBatch<'hedwig, T, P> { /// Add a message to be published in a batch pub fn message<D>(&mut self, msg: Message<D, T>) -> Result<&mut Self, Error> where D: serde::Serialize, T: Copy + Into<&'static str>, { let data_type = msg.data_type; let schema_version = msg.data_schema_version; let data_type_str = msg.data_type.into(); let schema_url = format!( "{}#/schemas/{}/{}.{}", self.hedwig.validator.schema_id, data_type_str, schema_version.0, schema_version.1, ); self.hedwig.validator.validate(&msg, &schema_url)?; let converted = msg .into_schema( self.hedwig.publisher_name.clone(), schema_url, FORMAT_VERSION_V1, ) .map_err(Error::MessageSerialize)?; let route = (self.hedwig.message_router)(data_type, converted.format_version.0) .ok_or_else(|| Error::MessageRoute(converted.id))?; self.messages.push((route, converted)); Ok(self) } /// Publish all the messages /// /// Does not consume the builder. Will return `Ok` only if and when all of the messages from /// the builder have been published successfully. In case of failure, unpublished messages will /// remain enqueued in this builder for a subsequent publish call. pub async fn publish(&mut self) -> Result<Vec<P::MessageId>, Error> where P: Publisher, { let mut message_ids = Vec::with_capacity(self.messages.len()); // Sort the messages by the topic and group them into batches self.messages.sort_by_key(|&(k, _)| k); let mut current_topic = ""; let mut current_batch = Vec::new(); let mut futures_unordered = futures::stream::FuturesUnordered::new(); let publisher = &self.hedwig.publisher; let make_job = |topic: &'static str, batch: Vec<ValidatedMessage>| async move { (topic, publisher.publish(topic, batch).await) }; for (topic, message) in mem::replace(&mut self.messages, Vec::new()) { if current_topic!= topic &&!current_batch.is_empty() { let batch = mem::replace(&mut current_batch, Vec::new()); futures_unordered.push(make_job(current_topic, batch)); } current_topic = topic; current_batch.push(message) } if!current_batch.is_empty() { futures_unordered.push(make_job(current_topic, current_batch)); } let mut errors = Vec::new(); // Extract the results from all the futures while let (Some(result), stream) = futures_unordered.into_future().await { match result { (_, PublisherResult::Success(ids)) => message_ids.extend(ids), (topic, PublisherResult::OneError(err, failed_msgs)) => { self.messages .extend(failed_msgs.into_iter().map(|m| (topic, m))); errors.push(err); } (topic, PublisherResult::PerMessage(vec)) => { for message in vec { match message { Ok(id) => message_ids.push(id), Err((err, failed_msg)) => { self.messages.push((topic, failed_msg)); errors.push(err); } } } } } futures_unordered = stream; } if let Some(first_error) = errors.pop() { Err(if errors.is_empty() { Error::Publisher(first_error) } else { Error::PublisherMultiple(first_error, errors) }) } else { Ok(message_ids) } } /// Publishes just one message /// /// Panics if the builder contains anything but 1 message. async fn publish_one(mut self) -> Result<P::MessageId, Error> where P: Publisher, { let (topic, message) = if let Some(v) = self.messages.pop() { assert!( self.messages.is_empty(), "messages buffer must contain exactly 1 entry!" ); v } else { panic!("messages buffer must contain exactly 1 entry!") }; match self.hedwig.publisher.publish(topic, vec![message]).await { PublisherResult::Success(mut ids) if ids.len() == 1 => Ok(ids.pop().unwrap()), PublisherResult::OneError(err, _) => Err(Error::Publisher(err)), PublisherResult::PerMessage(mut results) if results.len() == 1 => { results.pop().unwrap().map_err(|(e, _)| Error::Publisher(e)) } _ => { panic!("Publisher should have returned 1 result only!"); } } } } /// A message builder #[derive(Clone, Debug, PartialEq)] pub struct Message<D, T> { /// Message identifier id: Option<Uuid>, /// Creation timestamp timestamp: std::time::Duration, /// Message headers headers: Option<Headers>, /// Message data data: D, /// Message type data_type: T, data_schema_version: Version, } impl<D, T> Message<D, T> { /// Construct a new message pub fn
(data_type: T, data_schema_version: Version, data: D) -> Self { Message { id: None, timestamp: SystemTime::now() .duration_since(UNIX_EPOCH) .expect("time is before the unix epoch"), headers: None, data, data_type, data_schema_version, } } /// Overwrite the header map associated with the message /// /// This may be used to track the `request_id`, for example. pub fn headers(mut self, headers: Headers) -> Self { self.headers = Some(headers); self } /// Add a custom header to the message /// /// This may be used to track the `request_id`, for example. pub fn header<H, V>(mut self, header: H, value: V) -> Self where H: Into<String>, V: Into<String>, { if let Some(ref mut hdrs) = self.headers { hdrs.insert(header.into(), value.into()); } else { let mut map = HashMap::new(); map.insert(header.into(), value.into()); self.headers = Some(map); } self } /// Add custom id to the message /// /// If not called, a random UUID is generated for this message. pub fn id(mut self, id: Uuid) -> Self { self.id = Some(id); self } fn into_schema( self, publisher_name: String, schema: String, format_version: Version, ) -> Result<ValidatedMessage, serde_json::Error> where D: serde::Serialize, { Ok(ValidatedMessage { id: self.id.unwrap_or_else(Uuid::new_v4), metadata: Metadata { timestamp: self.timestamp.as_millis(), publisher: publisher_name, headers: self.headers.unwrap_or_else(HashMap::new), }, schema, format_version, data: serde_json::to_value(self.data)?, }) } } /// Additional metadata associated with a message #[derive(Clone, Debug, PartialEq, serde::Serialize)] struct Metadata { /// The timestamp when message was created in the publishing service timestamp: u128, /// Name of the publishing service publisher: String, /// Custom headers /// /// This may be used to track request_id, for example. headers: Headers, } /// A validated message /// /// This data type is the schema or the json messages being sent over the wire. #[derive(Debug, serde::Serialize)] pub struct ValidatedMessage { /// Unique message identifier id: Uuid, /// The metadata associated with the message metadata: Metadata, /// URI of the schema validating this message /// /// E.g. `https://hedwig.domain.xyz/schemas#/schemas/user.created/1.0` schema: String, /// Format of the message schema used format_version: Version, /// The message data data: serde_json::Value, } #[cfg(test)] mod tests { use super::*; use strum_macros::IntoStaticStr; #[derive(Clone, Copy, Debug, IntoStaticStr, Hash, PartialEq, Eq)] enum MessageType { #[strum(serialize = "user.created")] UserCreated, #[strum(serialize = "invalid.schema")] InvalidSchema, #[strum(serialize = "invalid.route")] InvalidRoute, } #[derive(Clone, Debug, serde::Serialize, PartialEq)] struct UserCreatedData { user_id: String, } const VERSION_1_0: Version = Version(MajorVersion(1), MinorVersion(0)); const SCHEMA: &str = r#" { "$id": "https://hedwig.standard.ai/schema", "$schema": "https://json-schema.org/draft-04/schema#", "description": "Example Schema", "schemas": { "user.created": { "1.*": { "description": "A new user was created", "type": "object", "x-versions": [ "1.0" ], "required": [ "user_id" ], "properties": { "user_id": { "$ref": "https://hedwig.standard.ai/schema#/definitions/UserId/1.0" } } } }, "invalid.route": { "1.*": {} } }, "definitions": { "UserId": { "1.0": { "type": "string" } } } }"#; fn router(t: MessageType, v: MajorVersion) -> Option<&'static str> { match (t, v) { (MessageType::UserCreated, MajorVersion(1)) => Some("dev-user-created-v1"), (MessageType::InvalidSchema, MajorVersion(1)) => Some("invalid-schema"), _ => None, } } fn mock_hedwig() -> Hedwig<MessageType, publishers::MockPublisher> { Hedwig::new( SCHEMA, "myapp", publishers::MockPublisher::default(), router, ) .unwrap() }
new
identifier_name
lib.rs
//! A Rust wrapper around [Zathura's] plugin API, allowing plugin development in //! Rust. //! //! This library wraps the plugin interface and exposes the [`ZathuraPlugin`] //! trait and the [`plugin_entry!`] macro as the primary way to implement a Rust //! plugin for Zathura. //! //! # Examples //! //! ``` //! # use zathura_plugin::*; //! struct PluginType {} //! //! impl ZathuraPlugin for PluginType { //! type DocumentData = (); //! type PageData = (); //! //! fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_init(page: PageRef<'_>, doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_render( //! page: PageRef<'_>, //! doc_data: &mut Self::DocumentData, //! page_data: &mut Self::PageData, //! cairo: &mut cairo::Context, //! printing: bool, //! ) -> Result<(), PluginError> { //! unimplemented!() //! } //! } //! //! plugin_entry!("MyPlugin", PluginType, ["text/plain", "application/pdf"]); //! ``` //! //! [Zathura's]: https://pwmt.org/projects/zathura/ //! [`ZathuraPlugin`]: trait.ZathuraPlugin.html //! [`plugin_entry!`]: macro.plugin_entry.html #![doc(html_root_url = "https://docs.rs/zathura-plugin/0.4.0")] #![warn(missing_debug_implementations, rust_2018_idioms)] mod document; mod error; mod page; pub use { self::{document::*, error::*, page::*}, zathura_plugin_sys as sys, }; // Used by the macro #[doc(hidden)] pub use pkg_version::{pkg_version_major, pkg_version_minor, pkg_version_patch}; /// Information needed to configure a Zathura document. #[derive(Debug)] pub struct DocumentInfo<P: ZathuraPlugin +?Sized> { /// Number of pages to create in the document. pub page_count: u32, /// Plugin-specific data to attach to the document. pub plugin_data: P::DocumentData, } /// Information needed to configure a document page. #[derive(Debug)] pub struct PageInfo<P: ZathuraPlugin +?Sized> { pub width: f64, pub height: f64, pub plugin_data: P::PageData, } /// Trait to be implemented by Zathura plugins. pub trait ZathuraPlugin { /// Plugin-specific data attached to Zathura documents. /// /// If the plugin doesn't need to associate custom data with the document, /// this can be set to `()`. type DocumentData; /// Plugin-specific data attached to every document page. /// /// If the plugin doesn't need to associate custom data with every page, /// this can be set to `()`. type PageData; /// Open a document and read its metadata. /// /// This function has to determine and return the number of pages in the /// document. Zathura will create that number of pages and call the plugin's /// page initialization and rendering methods. fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError>; /// Additional hook called before freeing the document resources. /// /// It is not necessary for plugins to implement this. The library will take /// care of freeing the `DocumentData` attached to the document, and Zathura /// itself will free the actual document. fn document_free( doc: DocumentRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<(), PluginError> { let _ = (doc, doc_data); Ok(()) } /// Initialize a document page and obtain its properties. /// /// This is called once per page when the document is loaded initially. /// /// The plugin has to return a `PageInfo` structure containing page /// properties that will be applied by the library. fn page_init( page: PageRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<PageInfo<Self>, PluginError>; /// Additional hook called before freeing page resources. /// /// This doesn't have to be implemented by a plugin. The library already /// takes care of freeing the `PageData` associated with the page, and /// Zathura will free the page itself. fn page_free( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, ) -> Result<(), PluginError> { let _ = (page, doc_data, page_data); Ok(()) } /// Render a document page to a Cairo context. /// /// # Parameters /// /// * **`page`**: Mutable reference to the page to render. /// * **`doc_data`**: Plugin-specific data attached to the document. /// * **`page_data`**: Plugin-specific data attached to the page. /// * **`cairo`**: The Cairo context to render to. /// * **`printing`**: Whether the page is being rendered for printing /// (`true`) or viewing (`false`). fn page_render( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError>; } /// `extern "C"` functions wrapping the Rust `ZathuraPlugin` functions. /// /// This is not public API and is only intended to be used by the /// `plugin_entry!` macro. #[doc(hidden)] pub mod wrapper { use { crate::{sys::*, *}, cairo, std::{ ffi::c_void, panic::{catch_unwind, AssertUnwindSafe}, }, }; trait ResultExt { fn to_zathura(self) -> zathura_error_t; } impl ResultExt for Result<(), PluginError> { fn to_zathura(self) -> zathura_error_t { match self { Ok(()) => 0, Err(e) => e as zathura_error_t, } } } fn wrap(f: impl FnOnce() -> Result<(), PluginError>) -> Result<(), PluginError> { match catch_unwind(AssertUnwindSafe(f)) { Ok(r) => r, Err(_) => Err(PluginError::Unknown), } } /// Open a document and set the number of pages to create in `document`. pub unsafe extern "C" fn document_open<P: ZathuraPlugin>( document: *mut zathura_document_t, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let info = P::document_open(doc)?; let mut doc = DocumentRef::from_raw(document); doc.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); doc.set_page_count(info.page_count); Ok(()) }) .to_zathura() } /// Free plugin-specific data in `document`. /// /// This is called by `zathura_document_free` and thus must not attempt to /// free the document again. pub unsafe extern "C" fn
<P: ZathuraPlugin>( document: *mut zathura_document_t, _data: *mut c_void, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let doc_data = &mut *(doc.plugin_data() as *mut P::DocumentData); let result = P::document_free(doc, doc_data); let doc = DocumentRef::from_raw(document); let plugin_data = doc.plugin_data() as *mut P::DocumentData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() } /// Initialize a page and set its dimensions. /// /// If the page size is not set, rendering on it has no effect and the page /// appears invisible. pub unsafe extern "C" fn page_init<P: ZathuraPlugin>( page: *mut zathura_page_t, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); // Obtaining the document data is safe, since there is no other way to get access to it // while this function executes. let doc_data = p.document().plugin_data() as *mut P::DocumentData; let info = P::page_init(p, &mut *doc_data)?; let mut p = PageRef::from_raw(page); p.set_width(info.width); p.set_height(info.height); p.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); Ok(()) }) .to_zathura() } /// Deallocate plugin-specific page data. /// /// If this function is missing, the *document* will not be freed. pub unsafe extern "C" fn page_clear<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, ) -> zathura_error_t { wrap(|| { let result = { let mut p = PageRef::from_raw(page); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let page_data = &mut *(p.plugin_data() as *mut P::PageData); P::page_free(p, doc_data, page_data) }; // Free the `PageData` let p = PageRef::from_raw(page); let plugin_data = p.plugin_data() as *mut P::PageData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() } /// Render a page to a Cairo context. pub unsafe extern "C" fn page_render_cairo<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, cairo: *mut sys::cairo_t, printing: bool, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); let page_data = &mut *(p.plugin_data() as *mut P::PageData); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let mut cairo = cairo::Context::from_raw_borrow(cairo as *mut _); P::page_render(p, doc_data, page_data, &mut cairo, printing) }) .to_zathura() } } /// Declares this library as a Zathura plugin. /// /// A crate can only provide one Zathura plugin, so this macro may only be /// called once per crate. /// /// For this to work, this crate must be built as a `cdylib` and the result put /// somewhere Zathura can find it. An easy way to iterate on a plugin is running /// this in the workspace root after any changes: /// /// ```notrust /// cargo build && zathura -p target/debug/ <file> /// ``` /// /// # Examples /// /// For a usage example of this macro, refer to the crate-level docs. #[macro_export] macro_rules! plugin_entry { ( $name:literal, $plugin_ty:ty, [ $($mime:literal),+ $(,)? ] ) => { #[doc(hidden)] #[repr(transparent)] #[allow(warnings)] pub struct __AssertSync<T>(T); unsafe impl<T> Sync for __AssertSync<T> {} #[doc(hidden)] #[no_mangle] pub static mut zathura_plugin_3_4: /* API=3, ABI=4 */ __AssertSync<$crate::sys::zathura_plugin_definition_t> = __AssertSync({ use $crate::sys::*; use $crate::wrapper::*; zathura_plugin_definition_t { name: concat!($name, "\0").as_ptr() as *const _, version: zathura_plugin_version_t { major: $crate::pkg_version_major!(), minor: $crate::pkg_version_minor!(), rev: $crate::pkg_version_patch!(), }, mime_types_size: { // Sum up as many 1s as there are entries in `$mime`. The // `$mime;` tells the compiler which syntax variable to // iterate over; it is disposed with no effect. 0 $(+ { $mime; 1 })+ }, mime_types: [ $( concat!($mime, "\0").as_ptr() as *const _, )+ ].as_ptr() as *mut _, // assuming Zathura never mutates this functions: zathura_plugin_functions_t { document_open: Some(document_open::<$plugin_ty>), document_free: Some(document_free::<$plugin_ty>), document_index_generate: None, document_save_as: None, document_attachments_get: None, document_attachment_save: None, document_get_information: None, page_init: Some(page_init::<$plugin_ty>), page_clear: Some(page_clear::<$plugin_ty>), page_search_text: None, page_links_get: None, page_form_fields_get: None, page_images_get: None, page_image_get_cairo: None, page_get_text: None, page_render: None, // no longer used? page_render_cairo: Some(page_render_cairo::<$plugin_ty>), page_get_label: None, }, } }); }; } #[cfg(feature = "testplugin")] struct TestPlugin; #[cfg(feature = "testplugin")] impl ZathuraPlugin for TestPlugin { type DocumentData = (); type PageData = (); fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { println!("open: {:?}", doc.basename_utf8()); println!("path: {:?}", doc.path_utf8()); println!("url: {:?}", doc.uri_utf8()); println!("{} pages", doc.page_count()); Ok(DocumentInfo { page_count: 5, plugin_data: (), }) } fn document_free(doc: DocumentRef<'_>, _doc_data: &mut ()) -> Result<(), PluginError> { println!("free! {:?}", doc); Ok(()) } fn page_init(page: PageRef<'_>, _doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { println!("page init: {:?}", page); Ok(PageInfo { width: 75.0, height: 100.0, plugin_data: (), }) } fn page_free( page: PageRef<'_>, _doc_data: &mut (), _page_data: &mut (), ) -> Result<(), PluginError> { println!("page free: {:?}", page); Ok(()) } fn page_render( mut page: PageRef<'_>, _doc_data: &mut Self::DocumentData, _page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError> { println!( "render! {:?}, index={:?}, {}x{}, {:?}", page, page.index(), page.width(), page.height(), printing ); { let doc = page.document(); println!( "doc: zoom={}, scale={}, rotation={}°, ppi={}, scale={:?}, cell-size={:?}", doc.zoom(), doc.scale(), doc.rotation(), doc.viewport_ppi(), doc.scaling_factors(), doc.cell_size(), ); } println!( "cairo: scale={:?}, 50,50={:?}", cairo.get_target().get_device_scale(), cairo.user_to_device(50.0, 50.0), ); if page.index() == 0 { cairo.move_to(10.0, 10.0); cairo.show_text("Wello!"); cairo.set_source_rgb(0.0, 1.0, 1.0); cairo.set_line_width(1.0); cairo.move_to(0.0, 0.0); cairo.line_to(10.5, 50.5); cairo.stroke(); } Ok(()) } } #[cfg(feature = "testplugin")] plugin_entry!("TestPlugin", TestPlugin, ["text/plain"]);
document_free
identifier_name
lib.rs
//! A Rust wrapper around [Zathura's] plugin API, allowing plugin development in //! Rust. //! //! This library wraps the plugin interface and exposes the [`ZathuraPlugin`] //! trait and the [`plugin_entry!`] macro as the primary way to implement a Rust //! plugin for Zathura. //! //! # Examples //! //! ``` //! # use zathura_plugin::*; //! struct PluginType {} //! //! impl ZathuraPlugin for PluginType { //! type DocumentData = (); //! type PageData = (); //! //! fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_init(page: PageRef<'_>, doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_render( //! page: PageRef<'_>, //! doc_data: &mut Self::DocumentData, //! page_data: &mut Self::PageData, //! cairo: &mut cairo::Context, //! printing: bool, //! ) -> Result<(), PluginError> { //! unimplemented!() //! } //! } //! //! plugin_entry!("MyPlugin", PluginType, ["text/plain", "application/pdf"]); //! ``` //! //! [Zathura's]: https://pwmt.org/projects/zathura/ //! [`ZathuraPlugin`]: trait.ZathuraPlugin.html //! [`plugin_entry!`]: macro.plugin_entry.html #![doc(html_root_url = "https://docs.rs/zathura-plugin/0.4.0")] #![warn(missing_debug_implementations, rust_2018_idioms)] mod document; mod error; mod page; pub use { self::{document::*, error::*, page::*}, zathura_plugin_sys as sys, }; // Used by the macro #[doc(hidden)] pub use pkg_version::{pkg_version_major, pkg_version_minor, pkg_version_patch}; /// Information needed to configure a Zathura document. #[derive(Debug)] pub struct DocumentInfo<P: ZathuraPlugin +?Sized> { /// Number of pages to create in the document. pub page_count: u32, /// Plugin-specific data to attach to the document. pub plugin_data: P::DocumentData, } /// Information needed to configure a document page. #[derive(Debug)] pub struct PageInfo<P: ZathuraPlugin +?Sized> { pub width: f64, pub height: f64, pub plugin_data: P::PageData, } /// Trait to be implemented by Zathura plugins. pub trait ZathuraPlugin { /// Plugin-specific data attached to Zathura documents. /// /// If the plugin doesn't need to associate custom data with the document, /// this can be set to `()`. type DocumentData; /// Plugin-specific data attached to every document page. /// /// If the plugin doesn't need to associate custom data with every page, /// this can be set to `()`. type PageData; /// Open a document and read its metadata. /// /// This function has to determine and return the number of pages in the /// document. Zathura will create that number of pages and call the plugin's /// page initialization and rendering methods. fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError>; /// Additional hook called before freeing the document resources. /// /// It is not necessary for plugins to implement this. The library will take /// care of freeing the `DocumentData` attached to the document, and Zathura /// itself will free the actual document. fn document_free( doc: DocumentRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<(), PluginError> { let _ = (doc, doc_data); Ok(()) } /// Initialize a document page and obtain its properties. /// /// This is called once per page when the document is loaded initially. /// /// The plugin has to return a `PageInfo` structure containing page /// properties that will be applied by the library. fn page_init( page: PageRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<PageInfo<Self>, PluginError>; /// Additional hook called before freeing page resources. /// /// This doesn't have to be implemented by a plugin. The library already /// takes care of freeing the `PageData` associated with the page, and /// Zathura will free the page itself. fn page_free( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, ) -> Result<(), PluginError> { let _ = (page, doc_data, page_data); Ok(()) } /// Render a document page to a Cairo context. /// /// # Parameters /// /// * **`page`**: Mutable reference to the page to render. /// * **`doc_data`**: Plugin-specific data attached to the document. /// * **`page_data`**: Plugin-specific data attached to the page. /// * **`cairo`**: The Cairo context to render to. /// * **`printing`**: Whether the page is being rendered for printing /// (`true`) or viewing (`false`). fn page_render( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError>; } /// `extern "C"` functions wrapping the Rust `ZathuraPlugin` functions. /// /// This is not public API and is only intended to be used by the /// `plugin_entry!` macro. #[doc(hidden)] pub mod wrapper { use { crate::{sys::*, *}, cairo, std::{ ffi::c_void, panic::{catch_unwind, AssertUnwindSafe}, }, }; trait ResultExt { fn to_zathura(self) -> zathura_error_t; } impl ResultExt for Result<(), PluginError> { fn to_zathura(self) -> zathura_error_t { match self { Ok(()) => 0, Err(e) => e as zathura_error_t, } } } fn wrap(f: impl FnOnce() -> Result<(), PluginError>) -> Result<(), PluginError> { match catch_unwind(AssertUnwindSafe(f)) { Ok(r) => r, Err(_) => Err(PluginError::Unknown), } } /// Open a document and set the number of pages to create in `document`. pub unsafe extern "C" fn document_open<P: ZathuraPlugin>( document: *mut zathura_document_t, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let info = P::document_open(doc)?; let mut doc = DocumentRef::from_raw(document); doc.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); doc.set_page_count(info.page_count); Ok(()) }) .to_zathura() } /// Free plugin-specific data in `document`. /// /// This is called by `zathura_document_free` and thus must not attempt to /// free the document again. pub unsafe extern "C" fn document_free<P: ZathuraPlugin>( document: *mut zathura_document_t, _data: *mut c_void, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let doc_data = &mut *(doc.plugin_data() as *mut P::DocumentData); let result = P::document_free(doc, doc_data); let doc = DocumentRef::from_raw(document); let plugin_data = doc.plugin_data() as *mut P::DocumentData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() } /// Initialize a page and set its dimensions. /// /// If the page size is not set, rendering on it has no effect and the page /// appears invisible. pub unsafe extern "C" fn page_init<P: ZathuraPlugin>( page: *mut zathura_page_t, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); // Obtaining the document data is safe, since there is no other way to get access to it // while this function executes. let doc_data = p.document().plugin_data() as *mut P::DocumentData; let info = P::page_init(p, &mut *doc_data)?; let mut p = PageRef::from_raw(page); p.set_width(info.width); p.set_height(info.height); p.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); Ok(()) }) .to_zathura() } /// Deallocate plugin-specific page data. /// /// If this function is missing, the *document* will not be freed. pub unsafe extern "C" fn page_clear<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, ) -> zathura_error_t
/// Render a page to a Cairo context. pub unsafe extern "C" fn page_render_cairo<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, cairo: *mut sys::cairo_t, printing: bool, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); let page_data = &mut *(p.plugin_data() as *mut P::PageData); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let mut cairo = cairo::Context::from_raw_borrow(cairo as *mut _); P::page_render(p, doc_data, page_data, &mut cairo, printing) }) .to_zathura() } } /// Declares this library as a Zathura plugin. /// /// A crate can only provide one Zathura plugin, so this macro may only be /// called once per crate. /// /// For this to work, this crate must be built as a `cdylib` and the result put /// somewhere Zathura can find it. An easy way to iterate on a plugin is running /// this in the workspace root after any changes: /// /// ```notrust /// cargo build && zathura -p target/debug/ <file> /// ``` /// /// # Examples /// /// For a usage example of this macro, refer to the crate-level docs. #[macro_export] macro_rules! plugin_entry { ( $name:literal, $plugin_ty:ty, [ $($mime:literal),+ $(,)? ] ) => { #[doc(hidden)] #[repr(transparent)] #[allow(warnings)] pub struct __AssertSync<T>(T); unsafe impl<T> Sync for __AssertSync<T> {} #[doc(hidden)] #[no_mangle] pub static mut zathura_plugin_3_4: /* API=3, ABI=4 */ __AssertSync<$crate::sys::zathura_plugin_definition_t> = __AssertSync({ use $crate::sys::*; use $crate::wrapper::*; zathura_plugin_definition_t { name: concat!($name, "\0").as_ptr() as *const _, version: zathura_plugin_version_t { major: $crate::pkg_version_major!(), minor: $crate::pkg_version_minor!(), rev: $crate::pkg_version_patch!(), }, mime_types_size: { // Sum up as many 1s as there are entries in `$mime`. The // `$mime;` tells the compiler which syntax variable to // iterate over; it is disposed with no effect. 0 $(+ { $mime; 1 })+ }, mime_types: [ $( concat!($mime, "\0").as_ptr() as *const _, )+ ].as_ptr() as *mut _, // assuming Zathura never mutates this functions: zathura_plugin_functions_t { document_open: Some(document_open::<$plugin_ty>), document_free: Some(document_free::<$plugin_ty>), document_index_generate: None, document_save_as: None, document_attachments_get: None, document_attachment_save: None, document_get_information: None, page_init: Some(page_init::<$plugin_ty>), page_clear: Some(page_clear::<$plugin_ty>), page_search_text: None, page_links_get: None, page_form_fields_get: None, page_images_get: None, page_image_get_cairo: None, page_get_text: None, page_render: None, // no longer used? page_render_cairo: Some(page_render_cairo::<$plugin_ty>), page_get_label: None, }, } }); }; } #[cfg(feature = "testplugin")] struct TestPlugin; #[cfg(feature = "testplugin")] impl ZathuraPlugin for TestPlugin { type DocumentData = (); type PageData = (); fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { println!("open: {:?}", doc.basename_utf8()); println!("path: {:?}", doc.path_utf8()); println!("url: {:?}", doc.uri_utf8()); println!("{} pages", doc.page_count()); Ok(DocumentInfo { page_count: 5, plugin_data: (), }) } fn document_free(doc: DocumentRef<'_>, _doc_data: &mut ()) -> Result<(), PluginError> { println!("free! {:?}", doc); Ok(()) } fn page_init(page: PageRef<'_>, _doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { println!("page init: {:?}", page); Ok(PageInfo { width: 75.0, height: 100.0, plugin_data: (), }) } fn page_free( page: PageRef<'_>, _doc_data: &mut (), _page_data: &mut (), ) -> Result<(), PluginError> { println!("page free: {:?}", page); Ok(()) } fn page_render( mut page: PageRef<'_>, _doc_data: &mut Self::DocumentData, _page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError> { println!( "render! {:?}, index={:?}, {}x{}, {:?}", page, page.index(), page.width(), page.height(), printing ); { let doc = page.document(); println!( "doc: zoom={}, scale={}, rotation={}°, ppi={}, scale={:?}, cell-size={:?}", doc.zoom(), doc.scale(), doc.rotation(), doc.viewport_ppi(), doc.scaling_factors(), doc.cell_size(), ); } println!( "cairo: scale={:?}, 50,50={:?}", cairo.get_target().get_device_scale(), cairo.user_to_device(50.0, 50.0), ); if page.index() == 0 { cairo.move_to(10.0, 10.0); cairo.show_text("Wello!"); cairo.set_source_rgb(0.0, 1.0, 1.0); cairo.set_line_width(1.0); cairo.move_to(0.0, 0.0); cairo.line_to(10.5, 50.5); cairo.stroke(); } Ok(()) } } #[cfg(feature = "testplugin")] plugin_entry!("TestPlugin", TestPlugin, ["text/plain"]);
{ wrap(|| { let result = { let mut p = PageRef::from_raw(page); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let page_data = &mut *(p.plugin_data() as *mut P::PageData); P::page_free(p, doc_data, page_data) }; // Free the `PageData` let p = PageRef::from_raw(page); let plugin_data = p.plugin_data() as *mut P::PageData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() }
identifier_body
lib.rs
//! A Rust wrapper around [Zathura's] plugin API, allowing plugin development in //! Rust. //! //! This library wraps the plugin interface and exposes the [`ZathuraPlugin`] //! trait and the [`plugin_entry!`] macro as the primary way to implement a Rust //! plugin for Zathura.
//! # use zathura_plugin::*; //! struct PluginType {} //! //! impl ZathuraPlugin for PluginType { //! type DocumentData = (); //! type PageData = (); //! //! fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_init(page: PageRef<'_>, doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { //! unimplemented!() //! } //! //! fn page_render( //! page: PageRef<'_>, //! doc_data: &mut Self::DocumentData, //! page_data: &mut Self::PageData, //! cairo: &mut cairo::Context, //! printing: bool, //! ) -> Result<(), PluginError> { //! unimplemented!() //! } //! } //! //! plugin_entry!("MyPlugin", PluginType, ["text/plain", "application/pdf"]); //! ``` //! //! [Zathura's]: https://pwmt.org/projects/zathura/ //! [`ZathuraPlugin`]: trait.ZathuraPlugin.html //! [`plugin_entry!`]: macro.plugin_entry.html #![doc(html_root_url = "https://docs.rs/zathura-plugin/0.4.0")] #![warn(missing_debug_implementations, rust_2018_idioms)] mod document; mod error; mod page; pub use { self::{document::*, error::*, page::*}, zathura_plugin_sys as sys, }; // Used by the macro #[doc(hidden)] pub use pkg_version::{pkg_version_major, pkg_version_minor, pkg_version_patch}; /// Information needed to configure a Zathura document. #[derive(Debug)] pub struct DocumentInfo<P: ZathuraPlugin +?Sized> { /// Number of pages to create in the document. pub page_count: u32, /// Plugin-specific data to attach to the document. pub plugin_data: P::DocumentData, } /// Information needed to configure a document page. #[derive(Debug)] pub struct PageInfo<P: ZathuraPlugin +?Sized> { pub width: f64, pub height: f64, pub plugin_data: P::PageData, } /// Trait to be implemented by Zathura plugins. pub trait ZathuraPlugin { /// Plugin-specific data attached to Zathura documents. /// /// If the plugin doesn't need to associate custom data with the document, /// this can be set to `()`. type DocumentData; /// Plugin-specific data attached to every document page. /// /// If the plugin doesn't need to associate custom data with every page, /// this can be set to `()`. type PageData; /// Open a document and read its metadata. /// /// This function has to determine and return the number of pages in the /// document. Zathura will create that number of pages and call the plugin's /// page initialization and rendering methods. fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError>; /// Additional hook called before freeing the document resources. /// /// It is not necessary for plugins to implement this. The library will take /// care of freeing the `DocumentData` attached to the document, and Zathura /// itself will free the actual document. fn document_free( doc: DocumentRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<(), PluginError> { let _ = (doc, doc_data); Ok(()) } /// Initialize a document page and obtain its properties. /// /// This is called once per page when the document is loaded initially. /// /// The plugin has to return a `PageInfo` structure containing page /// properties that will be applied by the library. fn page_init( page: PageRef<'_>, doc_data: &mut Self::DocumentData, ) -> Result<PageInfo<Self>, PluginError>; /// Additional hook called before freeing page resources. /// /// This doesn't have to be implemented by a plugin. The library already /// takes care of freeing the `PageData` associated with the page, and /// Zathura will free the page itself. fn page_free( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, ) -> Result<(), PluginError> { let _ = (page, doc_data, page_data); Ok(()) } /// Render a document page to a Cairo context. /// /// # Parameters /// /// * **`page`**: Mutable reference to the page to render. /// * **`doc_data`**: Plugin-specific data attached to the document. /// * **`page_data`**: Plugin-specific data attached to the page. /// * **`cairo`**: The Cairo context to render to. /// * **`printing`**: Whether the page is being rendered for printing /// (`true`) or viewing (`false`). fn page_render( page: PageRef<'_>, doc_data: &mut Self::DocumentData, page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError>; } /// `extern "C"` functions wrapping the Rust `ZathuraPlugin` functions. /// /// This is not public API and is only intended to be used by the /// `plugin_entry!` macro. #[doc(hidden)] pub mod wrapper { use { crate::{sys::*, *}, cairo, std::{ ffi::c_void, panic::{catch_unwind, AssertUnwindSafe}, }, }; trait ResultExt { fn to_zathura(self) -> zathura_error_t; } impl ResultExt for Result<(), PluginError> { fn to_zathura(self) -> zathura_error_t { match self { Ok(()) => 0, Err(e) => e as zathura_error_t, } } } fn wrap(f: impl FnOnce() -> Result<(), PluginError>) -> Result<(), PluginError> { match catch_unwind(AssertUnwindSafe(f)) { Ok(r) => r, Err(_) => Err(PluginError::Unknown), } } /// Open a document and set the number of pages to create in `document`. pub unsafe extern "C" fn document_open<P: ZathuraPlugin>( document: *mut zathura_document_t, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let info = P::document_open(doc)?; let mut doc = DocumentRef::from_raw(document); doc.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); doc.set_page_count(info.page_count); Ok(()) }) .to_zathura() } /// Free plugin-specific data in `document`. /// /// This is called by `zathura_document_free` and thus must not attempt to /// free the document again. pub unsafe extern "C" fn document_free<P: ZathuraPlugin>( document: *mut zathura_document_t, _data: *mut c_void, ) -> zathura_error_t { wrap(|| { let doc = DocumentRef::from_raw(document); let doc_data = &mut *(doc.plugin_data() as *mut P::DocumentData); let result = P::document_free(doc, doc_data); let doc = DocumentRef::from_raw(document); let plugin_data = doc.plugin_data() as *mut P::DocumentData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() } /// Initialize a page and set its dimensions. /// /// If the page size is not set, rendering on it has no effect and the page /// appears invisible. pub unsafe extern "C" fn page_init<P: ZathuraPlugin>( page: *mut zathura_page_t, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); // Obtaining the document data is safe, since there is no other way to get access to it // while this function executes. let doc_data = p.document().plugin_data() as *mut P::DocumentData; let info = P::page_init(p, &mut *doc_data)?; let mut p = PageRef::from_raw(page); p.set_width(info.width); p.set_height(info.height); p.set_plugin_data(Box::into_raw(Box::new(info.plugin_data)) as *mut _); Ok(()) }) .to_zathura() } /// Deallocate plugin-specific page data. /// /// If this function is missing, the *document* will not be freed. pub unsafe extern "C" fn page_clear<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, ) -> zathura_error_t { wrap(|| { let result = { let mut p = PageRef::from_raw(page); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let page_data = &mut *(p.plugin_data() as *mut P::PageData); P::page_free(p, doc_data, page_data) }; // Free the `PageData` let p = PageRef::from_raw(page); let plugin_data = p.plugin_data() as *mut P::PageData; drop(Box::from_raw(plugin_data)); result }) .to_zathura() } /// Render a page to a Cairo context. pub unsafe extern "C" fn page_render_cairo<P: ZathuraPlugin>( page: *mut zathura_page_t, _data: *mut c_void, cairo: *mut sys::cairo_t, printing: bool, ) -> zathura_error_t { wrap(|| { let mut p = PageRef::from_raw(page); let page_data = &mut *(p.plugin_data() as *mut P::PageData); let doc_data = &mut *(p.document().plugin_data() as *mut P::DocumentData); let mut cairo = cairo::Context::from_raw_borrow(cairo as *mut _); P::page_render(p, doc_data, page_data, &mut cairo, printing) }) .to_zathura() } } /// Declares this library as a Zathura plugin. /// /// A crate can only provide one Zathura plugin, so this macro may only be /// called once per crate. /// /// For this to work, this crate must be built as a `cdylib` and the result put /// somewhere Zathura can find it. An easy way to iterate on a plugin is running /// this in the workspace root after any changes: /// /// ```notrust /// cargo build && zathura -p target/debug/ <file> /// ``` /// /// # Examples /// /// For a usage example of this macro, refer to the crate-level docs. #[macro_export] macro_rules! plugin_entry { ( $name:literal, $plugin_ty:ty, [ $($mime:literal),+ $(,)? ] ) => { #[doc(hidden)] #[repr(transparent)] #[allow(warnings)] pub struct __AssertSync<T>(T); unsafe impl<T> Sync for __AssertSync<T> {} #[doc(hidden)] #[no_mangle] pub static mut zathura_plugin_3_4: /* API=3, ABI=4 */ __AssertSync<$crate::sys::zathura_plugin_definition_t> = __AssertSync({ use $crate::sys::*; use $crate::wrapper::*; zathura_plugin_definition_t { name: concat!($name, "\0").as_ptr() as *const _, version: zathura_plugin_version_t { major: $crate::pkg_version_major!(), minor: $crate::pkg_version_minor!(), rev: $crate::pkg_version_patch!(), }, mime_types_size: { // Sum up as many 1s as there are entries in `$mime`. The // `$mime;` tells the compiler which syntax variable to // iterate over; it is disposed with no effect. 0 $(+ { $mime; 1 })+ }, mime_types: [ $( concat!($mime, "\0").as_ptr() as *const _, )+ ].as_ptr() as *mut _, // assuming Zathura never mutates this functions: zathura_plugin_functions_t { document_open: Some(document_open::<$plugin_ty>), document_free: Some(document_free::<$plugin_ty>), document_index_generate: None, document_save_as: None, document_attachments_get: None, document_attachment_save: None, document_get_information: None, page_init: Some(page_init::<$plugin_ty>), page_clear: Some(page_clear::<$plugin_ty>), page_search_text: None, page_links_get: None, page_form_fields_get: None, page_images_get: None, page_image_get_cairo: None, page_get_text: None, page_render: None, // no longer used? page_render_cairo: Some(page_render_cairo::<$plugin_ty>), page_get_label: None, }, } }); }; } #[cfg(feature = "testplugin")] struct TestPlugin; #[cfg(feature = "testplugin")] impl ZathuraPlugin for TestPlugin { type DocumentData = (); type PageData = (); fn document_open(doc: DocumentRef<'_>) -> Result<DocumentInfo<Self>, PluginError> { println!("open: {:?}", doc.basename_utf8()); println!("path: {:?}", doc.path_utf8()); println!("url: {:?}", doc.uri_utf8()); println!("{} pages", doc.page_count()); Ok(DocumentInfo { page_count: 5, plugin_data: (), }) } fn document_free(doc: DocumentRef<'_>, _doc_data: &mut ()) -> Result<(), PluginError> { println!("free! {:?}", doc); Ok(()) } fn page_init(page: PageRef<'_>, _doc_data: &mut ()) -> Result<PageInfo<Self>, PluginError> { println!("page init: {:?}", page); Ok(PageInfo { width: 75.0, height: 100.0, plugin_data: (), }) } fn page_free( page: PageRef<'_>, _doc_data: &mut (), _page_data: &mut (), ) -> Result<(), PluginError> { println!("page free: {:?}", page); Ok(()) } fn page_render( mut page: PageRef<'_>, _doc_data: &mut Self::DocumentData, _page_data: &mut Self::PageData, cairo: &mut cairo::Context, printing: bool, ) -> Result<(), PluginError> { println!( "render! {:?}, index={:?}, {}x{}, {:?}", page, page.index(), page.width(), page.height(), printing ); { let doc = page.document(); println!( "doc: zoom={}, scale={}, rotation={}°, ppi={}, scale={:?}, cell-size={:?}", doc.zoom(), doc.scale(), doc.rotation(), doc.viewport_ppi(), doc.scaling_factors(), doc.cell_size(), ); } println!( "cairo: scale={:?}, 50,50={:?}", cairo.get_target().get_device_scale(), cairo.user_to_device(50.0, 50.0), ); if page.index() == 0 { cairo.move_to(10.0, 10.0); cairo.show_text("Wello!"); cairo.set_source_rgb(0.0, 1.0, 1.0); cairo.set_line_width(1.0); cairo.move_to(0.0, 0.0); cairo.line_to(10.5, 50.5); cairo.stroke(); } Ok(()) } } #[cfg(feature = "testplugin")] plugin_entry!("TestPlugin", TestPlugin, ["text/plain"]);
//! //! # Examples //! //! ```
random_line_split
utxo_scanner_task.rs
static { async fn finalize( &self, total_scanned: u64, final_utxo_pos: u64, elapsed: Duration, ) -> Result<(), UtxoScannerError> { let metadata = self.get_metadata().await?.unwrap_or_default(); self.publish_event(UtxoScannerEvent::Progress { current_index: final_utxo_pos, total_index: final_utxo_pos, }); self.publish_event(UtxoScannerEvent::Completed { number_scanned: total_scanned, number_received: metadata.number_of_utxos, value_received: metadata.total_amount, time_taken: elapsed, }); // Presence of scanning keys are used to determine if a wallet is busy with recovery or not. if self.mode == UtxoScannerMode::Recovery { self.clear_db().await?; } Ok(()) } async fn connect_to_peer(&mut self, peer: NodeId) -> Result<PeerConnection, UtxoScannerError> { self.publish_event(UtxoScannerEvent::ConnectingToBaseNode(peer.clone())); debug!( target: LOG_TARGET, "Attempting UTXO sync with seed peer {} ({})", self.peer_index, peer, ); match self.resources.comms_connectivity.dial_peer(peer.clone()).await { Ok(conn) => Ok(conn), Err(e) => { self.publish_event(UtxoScannerEvent::ConnectionFailedToBaseNode { peer: peer.clone(), num_retries: self.num_retries, retry_limit: self.retry_limit, error: e.to_string(), }); // No use re-dialing a peer that is not responsive for recovery mode if self.mode == UtxoScannerMode::Recovery { if let Ok(Some(connection)) = self.resources.comms_connectivity.get_connection(peer.clone()).await { if connection.clone().disconnect().await.is_ok() { debug!(target: LOG_TARGET, "Disconnected base node peer {}", peer); } }; let _ = time::sleep(Duration::from_secs(30)); } Err(e.into()) }, } } async fn attempt_sync(&mut self, peer: NodeId) -> Result<(u64, u64, Duration), UtxoScannerError> { let mut connection = self.connect_to_peer(peer.clone()).await?; let mut client = connection .connect_rpc_using_builder(BaseNodeSyncRpcClient::builder().with_deadline(Duration::from_secs(60))) .await?; let latency = client.get_last_request_latency(); self.publish_event(UtxoScannerEvent::ConnectedToBaseNode( peer.clone(), latency.unwrap_or_default(), )); let timer = Instant::now(); let mut total_scanned = 0u64; loop { let start_index = self.get_start_utxo_mmr_pos(&mut client).await?; let tip_header = self.get_chain_tip_header(&mut client).await?; let output_mmr_size = tip_header.output_mmr_size; if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok((total_scanned, start_index, timer.elapsed())); } debug!( target: LOG_TARGET, "Scanning UTXO's (start_index = {}, output_mmr_size = {}, height = {}, tip_hash = {})", start_index, output_mmr_size, tip_header.height, tip_header.hash().to_hex() ); // start_index could be greater than output_mmr_size if we switch to a new peer that is behind the original // peer. In the common case, we wait for start index. if start_index >= output_mmr_size - 1 { debug!( target: LOG_TARGET, "Scanning complete UTXO #{} in {:.2?}", start_index, timer.elapsed() ); return Ok((total_scanned, start_index, timer.elapsed())); } let num_scanned = self.scan_utxos(&mut client, start_index, tip_header).await?; if num_scanned == 0 { return Err(UtxoScannerError::UtxoScanningError( "Peer returned 0 UTXOs to scan".to_string(), )); } debug!( target: LOG_TARGET, "Scanning round completed UTXO #{} in {:.2?} ({} scanned)", output_mmr_size, timer.elapsed(), num_scanned ); // let num_scanned = 0; total_scanned += num_scanned; // return Ok((total_scanned, start_index, timer.elapsed())); } } async fn
(&self, client: &mut BaseNodeSyncRpcClient) -> Result<BlockHeader, UtxoScannerError> { let chain_metadata = client.get_chain_metadata().await?; let chain_height = chain_metadata.height_of_longest_chain(); let end_header = client.get_header_by_height(chain_height).await?; let end_header = BlockHeader::try_from(end_header).map_err(|_| UtxoScannerError::ConversionError)?; Ok(end_header) } async fn get_start_utxo_mmr_pos(&self, client: &mut BaseNodeSyncRpcClient) -> Result<u64, UtxoScannerError> { let metadata = match self.get_metadata().await? { None => { let birthday_metadata = self.get_birthday_metadata(client).await?; self.set_metadata(birthday_metadata.clone()).await?; return Ok(birthday_metadata.utxo_index); }, Some(m) => m, }; // if it's none, we return 0 above. let request = FindChainSplitRequest { block_hashes: vec![metadata.height_hash], header_count: 1, }; // this returns the index of the vec of hashes we sent it, that is the last hash it knows of. match client.find_chain_split(request).await { Ok(_) => Ok(metadata.utxo_index + 1), Err(RpcError::RequestFailed(err)) if err.as_status_code().is_not_found() => { warn!(target: LOG_TARGET, "Reorg detected: {}", err); // The node does not know of the last hash we scanned, thus we had a chain split. // We now start at the wallet birthday again let birthday_metdadata = self.get_birthday_metadata(client).await?; Ok(birthday_metdadata.utxo_index) }, Err(err) => Err(err.into()), } } async fn scan_utxos( &mut self, client: &mut BaseNodeSyncRpcClient, start_mmr_leaf_index: u64, end_header: BlockHeader, ) -> Result<u64, UtxoScannerError> { debug!( target: LOG_TARGET, "Scanning UTXO's from #{} to #{} (height {})", start_mmr_leaf_index, end_header.output_mmr_size, end_header.height ); let end_header_hash = end_header.hash(); let output_mmr_size = end_header.output_mmr_size; let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let mut total_scanned = 0; self.publish_event(UtxoScannerEvent::Progress { current_index: start_mmr_leaf_index, total_index: (output_mmr_size - 1), }); let request = SyncUtxosRequest { start: start_mmr_leaf_index, end_header_hash: end_header_hash.clone(), include_pruned_utxos: false, include_deleted_bitmaps: false, }; let start = Instant::now(); let utxo_stream = client.sync_utxos(request).await?; trace!( target: LOG_TARGET, "bulletproof rewind profile - UTXO stream request time {} ms", start.elapsed().as_millis(), ); // We download in chunks for improved streaming efficiency const CHUNK_SIZE: usize = 125; let mut utxo_stream = utxo_stream.chunks(CHUNK_SIZE); const COMMIT_EVERY_N: u64 = (1000_i64 / CHUNK_SIZE as i64) as u64; let mut last_utxo_index = 0u64; let mut iteration_count = 0u64; let mut utxo_next_await_profiling = Vec::new(); let mut scan_for_outputs_profiling = Vec::new(); while let Some(response) = { let start = Instant::now(); let utxo_stream_next = utxo_stream.next().await; utxo_next_await_profiling.push(start.elapsed()); utxo_stream_next } { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(total_scanned as u64); } let (outputs, utxo_index) = convert_response_to_transaction_outputs(response, last_utxo_index)?; last_utxo_index = utxo_index; total_scanned += outputs.len(); iteration_count += 1; let start = Instant::now(); let found_outputs = self.scan_for_outputs(outputs).await?; scan_for_outputs_profiling.push(start.elapsed()); // Reduce the number of db hits by only persisting progress every N iterations if iteration_count % COMMIT_EVERY_N == 0 || last_utxo_index >= output_mmr_size - 1 { self.publish_event(UtxoScannerEvent::Progress { current_index: last_utxo_index, total_index: (output_mmr_size - 1), }); self.update_scanning_progress_in_db( last_utxo_index, total_amount, num_recovered, end_header_hash.clone(), ) .await?; } let (count, amount) = self.import_utxos_to_transaction_service(found_outputs).await?; num_recovered = num_recovered.saturating_add(count); total_amount += amount; } trace!( target: LOG_TARGET, "bulletproof rewind profile - streamed {} outputs in {} ms", total_scanned, utxo_next_await_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); trace!( target: LOG_TARGET, "bulletproof rewind profile - scanned {} outputs in {} ms", total_scanned, scan_for_outputs_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); self.update_scanning_progress_in_db(last_utxo_index, total_amount, num_recovered, end_header_hash) .await?; self.publish_event(UtxoScannerEvent::Progress { current_index: (output_mmr_size - 1), total_index: (output_mmr_size - 1), }); Ok(total_scanned as u64) } async fn update_scanning_progress_in_db( &self, last_utxo_index: u64, total_amount: MicroTari, num_recovered: u64, end_header_hash: Vec<u8>, ) -> Result<(), UtxoScannerError> { let mut meta_data = self.get_metadata().await?.unwrap_or_default(); meta_data.height_hash = end_header_hash; meta_data.number_of_utxos += num_recovered; meta_data.utxo_index = last_utxo_index; meta_data.total_amount += total_amount; self.set_metadata(meta_data).await?; Ok(()) } async fn scan_for_outputs( &mut self, outputs: Vec<TransactionOutput>, ) -> Result<Vec<(UnblindedOutput, String)>, UtxoScannerError> { let mut found_outputs: Vec<(UnblindedOutput, String)> = Vec::new(); if self.mode == UtxoScannerMode::Recovery { found_outputs.append( &mut self .resources .output_manager_service .scan_for_recoverable_outputs(outputs.clone()) .await? .into_iter() .map(|v| (v, format!("Recovered on {}.", Utc::now().naive_utc()))) .collect(), ); }; found_outputs.append( &mut self .resources .output_manager_service .scan_outputs_for_one_sided_payments(outputs.clone()) .await? .into_iter() .map(|v| { ( v, format!("Detected one-sided transaction on {}.", Utc::now().naive_utc()), ) }) .collect(), ); Ok(found_outputs) } async fn import_utxos_to_transaction_service( &mut self, utxos: Vec<(UnblindedOutput, String)>, ) -> Result<(u64, MicroTari), UtxoScannerError> { let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let source_public_key = self.resources.node_identity.public_key().clone(); for uo in utxos { match self .import_unblinded_utxo_to_transaction_service(uo.0.clone(), &source_public_key, uo.1) .await { Ok(_) => { num_recovered = num_recovered.saturating_add(1); total_amount += uo.0.value; }, Err(e) => return Err(UtxoScannerError::UtxoImportError(e.to_string())), } } Ok((num_recovered, total_amount)) } fn get_db_mode_key(&self) -> String { match self.mode { UtxoScannerMode::Recovery => RECOVERY_KEY.to_owned(), UtxoScannerMode::Scanning => SCANNING_KEY.to_owned(), } } async fn set_metadata(&self, data: ScanningMetadata) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let db_value = serde_json::to_string(&data)?; self.resources.db.set_client_key_value(total_key, db_value).await?; Ok(()) } async fn get_metadata(&self) -> Result<Option<ScanningMetadata>, UtxoScannerError> { let total_key = self.get_db_mode_key(); let value: Option<String> = self.resources.db.get_client_key_from_str(total_key).await?; match value { None => Ok(None), Some(v) => Ok(serde_json::from_str(&v)?), } } async fn clear_db(&self) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let _ = self.resources.db.clear_client_value(total_key).await?; Ok(()) } fn publish_event(&self, event: UtxoScannerEvent) { let _ = self.event_sender.send(event); } /// A faux incoming transaction will be created to provide a record of the event of importing a UTXO. The TxId of /// the generated transaction is returned. pub async fn import_unblinded_utxo_to_transaction_service( &mut self, unblinded_output: UnblindedOutput, source_public_key: &CommsPublicKey, message: String, ) -> Result<TxId, WalletError> { let tx_id = self .resources .transaction_service .import_utxo( unblinded_output.value, source_public_key.clone(), message, Some(unblinded_output.features.maturity), ) .await?; info!( target: LOG_TARGET, "UTXO (Commitment: {}) imported into wallet", unblinded_output .as_transaction_input(&self.resources.factories.commitment)? .commitment .to_hex() ); Ok(tx_id) } pub async fn run(mut self) -> Result<(), UtxoScannerError> { loop { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(()); } match self.get_next_peer() { Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((total_scanned, final_utxo_pos, elapsed)) => { debug!(target: LOG_TARGET, "Scanned to UTXO #{}", final_utxo_pos); self.finalize(total_scanned, final_utxo_pos, elapsed).await?; return Ok(()); }, Err(e) => { warn!( target: LOG_TARGET, "Failed to scan UTXO's from
get_chain_tip_header
identifier_name
utxo_scanner_task.rs
} }; let _ = time::sleep(Duration::from_secs(30)); } Err(e.into()) }, } } async fn attempt_sync(&mut self, peer: NodeId) -> Result<(u64, u64, Duration), UtxoScannerError> { let mut connection = self.connect_to_peer(peer.clone()).await?; let mut client = connection .connect_rpc_using_builder(BaseNodeSyncRpcClient::builder().with_deadline(Duration::from_secs(60))) .await?; let latency = client.get_last_request_latency(); self.publish_event(UtxoScannerEvent::ConnectedToBaseNode( peer.clone(), latency.unwrap_or_default(), )); let timer = Instant::now(); let mut total_scanned = 0u64; loop { let start_index = self.get_start_utxo_mmr_pos(&mut client).await?; let tip_header = self.get_chain_tip_header(&mut client).await?; let output_mmr_size = tip_header.output_mmr_size; if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok((total_scanned, start_index, timer.elapsed())); } debug!( target: LOG_TARGET, "Scanning UTXO's (start_index = {}, output_mmr_size = {}, height = {}, tip_hash = {})", start_index, output_mmr_size, tip_header.height, tip_header.hash().to_hex() ); // start_index could be greater than output_mmr_size if we switch to a new peer that is behind the original // peer. In the common case, we wait for start index. if start_index >= output_mmr_size - 1 { debug!( target: LOG_TARGET, "Scanning complete UTXO #{} in {:.2?}", start_index, timer.elapsed() ); return Ok((total_scanned, start_index, timer.elapsed())); } let num_scanned = self.scan_utxos(&mut client, start_index, tip_header).await?; if num_scanned == 0 { return Err(UtxoScannerError::UtxoScanningError( "Peer returned 0 UTXOs to scan".to_string(), )); } debug!( target: LOG_TARGET, "Scanning round completed UTXO #{} in {:.2?} ({} scanned)", output_mmr_size, timer.elapsed(), num_scanned ); // let num_scanned = 0; total_scanned += num_scanned; // return Ok((total_scanned, start_index, timer.elapsed())); } } async fn get_chain_tip_header(&self, client: &mut BaseNodeSyncRpcClient) -> Result<BlockHeader, UtxoScannerError> { let chain_metadata = client.get_chain_metadata().await?; let chain_height = chain_metadata.height_of_longest_chain(); let end_header = client.get_header_by_height(chain_height).await?; let end_header = BlockHeader::try_from(end_header).map_err(|_| UtxoScannerError::ConversionError)?; Ok(end_header) } async fn get_start_utxo_mmr_pos(&self, client: &mut BaseNodeSyncRpcClient) -> Result<u64, UtxoScannerError> { let metadata = match self.get_metadata().await? { None => { let birthday_metadata = self.get_birthday_metadata(client).await?; self.set_metadata(birthday_metadata.clone()).await?; return Ok(birthday_metadata.utxo_index); }, Some(m) => m, }; // if it's none, we return 0 above. let request = FindChainSplitRequest { block_hashes: vec![metadata.height_hash], header_count: 1, }; // this returns the index of the vec of hashes we sent it, that is the last hash it knows of. match client.find_chain_split(request).await { Ok(_) => Ok(metadata.utxo_index + 1), Err(RpcError::RequestFailed(err)) if err.as_status_code().is_not_found() => { warn!(target: LOG_TARGET, "Reorg detected: {}", err); // The node does not know of the last hash we scanned, thus we had a chain split. // We now start at the wallet birthday again let birthday_metdadata = self.get_birthday_metadata(client).await?; Ok(birthday_metdadata.utxo_index) }, Err(err) => Err(err.into()), } } async fn scan_utxos( &mut self, client: &mut BaseNodeSyncRpcClient, start_mmr_leaf_index: u64, end_header: BlockHeader, ) -> Result<u64, UtxoScannerError> { debug!( target: LOG_TARGET, "Scanning UTXO's from #{} to #{} (height {})", start_mmr_leaf_index, end_header.output_mmr_size, end_header.height ); let end_header_hash = end_header.hash(); let output_mmr_size = end_header.output_mmr_size; let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let mut total_scanned = 0; self.publish_event(UtxoScannerEvent::Progress { current_index: start_mmr_leaf_index, total_index: (output_mmr_size - 1), }); let request = SyncUtxosRequest { start: start_mmr_leaf_index, end_header_hash: end_header_hash.clone(), include_pruned_utxos: false, include_deleted_bitmaps: false, }; let start = Instant::now(); let utxo_stream = client.sync_utxos(request).await?; trace!( target: LOG_TARGET, "bulletproof rewind profile - UTXO stream request time {} ms", start.elapsed().as_millis(), ); // We download in chunks for improved streaming efficiency const CHUNK_SIZE: usize = 125; let mut utxo_stream = utxo_stream.chunks(CHUNK_SIZE); const COMMIT_EVERY_N: u64 = (1000_i64 / CHUNK_SIZE as i64) as u64; let mut last_utxo_index = 0u64; let mut iteration_count = 0u64; let mut utxo_next_await_profiling = Vec::new(); let mut scan_for_outputs_profiling = Vec::new(); while let Some(response) = { let start = Instant::now(); let utxo_stream_next = utxo_stream.next().await; utxo_next_await_profiling.push(start.elapsed()); utxo_stream_next } { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(total_scanned as u64); } let (outputs, utxo_index) = convert_response_to_transaction_outputs(response, last_utxo_index)?; last_utxo_index = utxo_index; total_scanned += outputs.len(); iteration_count += 1; let start = Instant::now(); let found_outputs = self.scan_for_outputs(outputs).await?; scan_for_outputs_profiling.push(start.elapsed()); // Reduce the number of db hits by only persisting progress every N iterations if iteration_count % COMMIT_EVERY_N == 0 || last_utxo_index >= output_mmr_size - 1 { self.publish_event(UtxoScannerEvent::Progress { current_index: last_utxo_index, total_index: (output_mmr_size - 1), }); self.update_scanning_progress_in_db( last_utxo_index, total_amount, num_recovered, end_header_hash.clone(), ) .await?; } let (count, amount) = self.import_utxos_to_transaction_service(found_outputs).await?; num_recovered = num_recovered.saturating_add(count); total_amount += amount; } trace!( target: LOG_TARGET, "bulletproof rewind profile - streamed {} outputs in {} ms", total_scanned, utxo_next_await_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); trace!( target: LOG_TARGET, "bulletproof rewind profile - scanned {} outputs in {} ms", total_scanned, scan_for_outputs_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); self.update_scanning_progress_in_db(last_utxo_index, total_amount, num_recovered, end_header_hash) .await?; self.publish_event(UtxoScannerEvent::Progress { current_index: (output_mmr_size - 1), total_index: (output_mmr_size - 1), }); Ok(total_scanned as u64) } async fn update_scanning_progress_in_db( &self, last_utxo_index: u64, total_amount: MicroTari, num_recovered: u64, end_header_hash: Vec<u8>, ) -> Result<(), UtxoScannerError> { let mut meta_data = self.get_metadata().await?.unwrap_or_default(); meta_data.height_hash = end_header_hash; meta_data.number_of_utxos += num_recovered; meta_data.utxo_index = last_utxo_index; meta_data.total_amount += total_amount; self.set_metadata(meta_data).await?; Ok(()) } async fn scan_for_outputs( &mut self, outputs: Vec<TransactionOutput>, ) -> Result<Vec<(UnblindedOutput, String)>, UtxoScannerError> { let mut found_outputs: Vec<(UnblindedOutput, String)> = Vec::new(); if self.mode == UtxoScannerMode::Recovery { found_outputs.append( &mut self .resources .output_manager_service .scan_for_recoverable_outputs(outputs.clone()) .await? .into_iter() .map(|v| (v, format!("Recovered on {}.", Utc::now().naive_utc()))) .collect(), ); }; found_outputs.append( &mut self .resources .output_manager_service .scan_outputs_for_one_sided_payments(outputs.clone()) .await? .into_iter() .map(|v| { ( v, format!("Detected one-sided transaction on {}.", Utc::now().naive_utc()), ) }) .collect(), ); Ok(found_outputs) } async fn import_utxos_to_transaction_service( &mut self, utxos: Vec<(UnblindedOutput, String)>, ) -> Result<(u64, MicroTari), UtxoScannerError> { let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let source_public_key = self.resources.node_identity.public_key().clone(); for uo in utxos { match self .import_unblinded_utxo_to_transaction_service(uo.0.clone(), &source_public_key, uo.1) .await { Ok(_) => { num_recovered = num_recovered.saturating_add(1); total_amount += uo.0.value; }, Err(e) => return Err(UtxoScannerError::UtxoImportError(e.to_string())), } } Ok((num_recovered, total_amount)) } fn get_db_mode_key(&self) -> String { match self.mode { UtxoScannerMode::Recovery => RECOVERY_KEY.to_owned(), UtxoScannerMode::Scanning => SCANNING_KEY.to_owned(), } } async fn set_metadata(&self, data: ScanningMetadata) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let db_value = serde_json::to_string(&data)?; self.resources.db.set_client_key_value(total_key, db_value).await?; Ok(()) } async fn get_metadata(&self) -> Result<Option<ScanningMetadata>, UtxoScannerError> { let total_key = self.get_db_mode_key(); let value: Option<String> = self.resources.db.get_client_key_from_str(total_key).await?; match value { None => Ok(None), Some(v) => Ok(serde_json::from_str(&v)?), } } async fn clear_db(&self) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let _ = self.resources.db.clear_client_value(total_key).await?; Ok(()) } fn publish_event(&self, event: UtxoScannerEvent) { let _ = self.event_sender.send(event); } /// A faux incoming transaction will be created to provide a record of the event of importing a UTXO. The TxId of /// the generated transaction is returned. pub async fn import_unblinded_utxo_to_transaction_service( &mut self, unblinded_output: UnblindedOutput, source_public_key: &CommsPublicKey, message: String, ) -> Result<TxId, WalletError> { let tx_id = self .resources .transaction_service .import_utxo( unblinded_output.value, source_public_key.clone(), message, Some(unblinded_output.features.maturity), ) .await?; info!( target: LOG_TARGET, "UTXO (Commitment: {}) imported into wallet", unblinded_output .as_transaction_input(&self.resources.factories.commitment)? .commitment .to_hex() ); Ok(tx_id) } pub async fn run(mut self) -> Result<(), UtxoScannerError> { loop { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(()); } match self.get_next_peer() { Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((total_scanned, final_utxo_pos, elapsed)) => { debug!(target: LOG_TARGET, "Scanned to UTXO #{}", final_utxo_pos); self.finalize(total_scanned, final_utxo_pos, elapsed).await?; return Ok(()); }, Err(e) => { warn!( target: LOG_TARGET, "Failed to scan UTXO's from base node {}: {}", peer, e ); self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, error: e.to_string(), }); continue; }, }, None => { self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, error: "No new peers to try after this round".to_string(), }); if self.num_retries >= self.retry_limit { self.publish_event(UtxoScannerEvent::ScanningFailed); return Err(UtxoScannerError::UtxoScanningError(format!( "Failed to scan UTXO's after {} attempt(s) using all {} sync peer(s). Aborting...", self.num_retries, self.peer_seeds.len() ))); } self.num_retries += 1; // Reset peer index to try connect to the first peer again self.peer_index = 0; }, } } } fn get_next_peer(&mut self) -> Option<NodeId>
{ let peer = self.peer_seeds.get(self.peer_index).map(NodeId::from_public_key); self.peer_index += 1; peer }
identifier_body
utxo_scanner_task.rs
// peer. In the common case, we wait for start index. if start_index >= output_mmr_size - 1 { debug!( target: LOG_TARGET, "Scanning complete UTXO #{} in {:.2?}", start_index, timer.elapsed() ); return Ok((total_scanned, start_index, timer.elapsed())); } let num_scanned = self.scan_utxos(&mut client, start_index, tip_header).await?; if num_scanned == 0 { return Err(UtxoScannerError::UtxoScanningError( "Peer returned 0 UTXOs to scan".to_string(), )); } debug!( target: LOG_TARGET, "Scanning round completed UTXO #{} in {:.2?} ({} scanned)", output_mmr_size, timer.elapsed(), num_scanned ); // let num_scanned = 0; total_scanned += num_scanned; // return Ok((total_scanned, start_index, timer.elapsed())); } } async fn get_chain_tip_header(&self, client: &mut BaseNodeSyncRpcClient) -> Result<BlockHeader, UtxoScannerError> { let chain_metadata = client.get_chain_metadata().await?; let chain_height = chain_metadata.height_of_longest_chain(); let end_header = client.get_header_by_height(chain_height).await?; let end_header = BlockHeader::try_from(end_header).map_err(|_| UtxoScannerError::ConversionError)?; Ok(end_header) } async fn get_start_utxo_mmr_pos(&self, client: &mut BaseNodeSyncRpcClient) -> Result<u64, UtxoScannerError> { let metadata = match self.get_metadata().await? { None => { let birthday_metadata = self.get_birthday_metadata(client).await?; self.set_metadata(birthday_metadata.clone()).await?; return Ok(birthday_metadata.utxo_index); }, Some(m) => m, }; // if it's none, we return 0 above. let request = FindChainSplitRequest { block_hashes: vec![metadata.height_hash], header_count: 1, }; // this returns the index of the vec of hashes we sent it, that is the last hash it knows of. match client.find_chain_split(request).await { Ok(_) => Ok(metadata.utxo_index + 1), Err(RpcError::RequestFailed(err)) if err.as_status_code().is_not_found() => { warn!(target: LOG_TARGET, "Reorg detected: {}", err); // The node does not know of the last hash we scanned, thus we had a chain split. // We now start at the wallet birthday again let birthday_metdadata = self.get_birthday_metadata(client).await?; Ok(birthday_metdadata.utxo_index) }, Err(err) => Err(err.into()), } } async fn scan_utxos( &mut self, client: &mut BaseNodeSyncRpcClient, start_mmr_leaf_index: u64, end_header: BlockHeader, ) -> Result<u64, UtxoScannerError> { debug!( target: LOG_TARGET, "Scanning UTXO's from #{} to #{} (height {})", start_mmr_leaf_index, end_header.output_mmr_size, end_header.height ); let end_header_hash = end_header.hash(); let output_mmr_size = end_header.output_mmr_size; let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let mut total_scanned = 0; self.publish_event(UtxoScannerEvent::Progress { current_index: start_mmr_leaf_index, total_index: (output_mmr_size - 1), }); let request = SyncUtxosRequest { start: start_mmr_leaf_index, end_header_hash: end_header_hash.clone(), include_pruned_utxos: false, include_deleted_bitmaps: false, }; let start = Instant::now(); let utxo_stream = client.sync_utxos(request).await?; trace!( target: LOG_TARGET, "bulletproof rewind profile - UTXO stream request time {} ms", start.elapsed().as_millis(), ); // We download in chunks for improved streaming efficiency const CHUNK_SIZE: usize = 125; let mut utxo_stream = utxo_stream.chunks(CHUNK_SIZE); const COMMIT_EVERY_N: u64 = (1000_i64 / CHUNK_SIZE as i64) as u64; let mut last_utxo_index = 0u64; let mut iteration_count = 0u64; let mut utxo_next_await_profiling = Vec::new(); let mut scan_for_outputs_profiling = Vec::new(); while let Some(response) = { let start = Instant::now(); let utxo_stream_next = utxo_stream.next().await; utxo_next_await_profiling.push(start.elapsed()); utxo_stream_next } { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(total_scanned as u64); } let (outputs, utxo_index) = convert_response_to_transaction_outputs(response, last_utxo_index)?; last_utxo_index = utxo_index; total_scanned += outputs.len(); iteration_count += 1; let start = Instant::now(); let found_outputs = self.scan_for_outputs(outputs).await?; scan_for_outputs_profiling.push(start.elapsed()); // Reduce the number of db hits by only persisting progress every N iterations if iteration_count % COMMIT_EVERY_N == 0 || last_utxo_index >= output_mmr_size - 1 { self.publish_event(UtxoScannerEvent::Progress { current_index: last_utxo_index, total_index: (output_mmr_size - 1), }); self.update_scanning_progress_in_db( last_utxo_index, total_amount, num_recovered, end_header_hash.clone(), ) .await?; } let (count, amount) = self.import_utxos_to_transaction_service(found_outputs).await?; num_recovered = num_recovered.saturating_add(count); total_amount += amount; } trace!( target: LOG_TARGET, "bulletproof rewind profile - streamed {} outputs in {} ms", total_scanned, utxo_next_await_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); trace!( target: LOG_TARGET, "bulletproof rewind profile - scanned {} outputs in {} ms", total_scanned, scan_for_outputs_profiling.iter().fold(0, |acc, &x| acc + x.as_millis()), ); self.update_scanning_progress_in_db(last_utxo_index, total_amount, num_recovered, end_header_hash) .await?; self.publish_event(UtxoScannerEvent::Progress { current_index: (output_mmr_size - 1), total_index: (output_mmr_size - 1), }); Ok(total_scanned as u64) } async fn update_scanning_progress_in_db( &self, last_utxo_index: u64, total_amount: MicroTari, num_recovered: u64, end_header_hash: Vec<u8>, ) -> Result<(), UtxoScannerError> { let mut meta_data = self.get_metadata().await?.unwrap_or_default(); meta_data.height_hash = end_header_hash; meta_data.number_of_utxos += num_recovered; meta_data.utxo_index = last_utxo_index; meta_data.total_amount += total_amount; self.set_metadata(meta_data).await?; Ok(()) } async fn scan_for_outputs( &mut self, outputs: Vec<TransactionOutput>, ) -> Result<Vec<(UnblindedOutput, String)>, UtxoScannerError> { let mut found_outputs: Vec<(UnblindedOutput, String)> = Vec::new(); if self.mode == UtxoScannerMode::Recovery { found_outputs.append( &mut self .resources .output_manager_service .scan_for_recoverable_outputs(outputs.clone()) .await? .into_iter() .map(|v| (v, format!("Recovered on {}.", Utc::now().naive_utc()))) .collect(), ); }; found_outputs.append( &mut self .resources .output_manager_service .scan_outputs_for_one_sided_payments(outputs.clone()) .await? .into_iter() .map(|v| { ( v, format!("Detected one-sided transaction on {}.", Utc::now().naive_utc()), ) }) .collect(), ); Ok(found_outputs) } async fn import_utxos_to_transaction_service( &mut self, utxos: Vec<(UnblindedOutput, String)>, ) -> Result<(u64, MicroTari), UtxoScannerError> { let mut num_recovered = 0u64; let mut total_amount = MicroTari::from(0); let source_public_key = self.resources.node_identity.public_key().clone(); for uo in utxos { match self .import_unblinded_utxo_to_transaction_service(uo.0.clone(), &source_public_key, uo.1) .await { Ok(_) => { num_recovered = num_recovered.saturating_add(1); total_amount += uo.0.value; }, Err(e) => return Err(UtxoScannerError::UtxoImportError(e.to_string())), } } Ok((num_recovered, total_amount)) } fn get_db_mode_key(&self) -> String { match self.mode { UtxoScannerMode::Recovery => RECOVERY_KEY.to_owned(), UtxoScannerMode::Scanning => SCANNING_KEY.to_owned(), } } async fn set_metadata(&self, data: ScanningMetadata) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let db_value = serde_json::to_string(&data)?; self.resources.db.set_client_key_value(total_key, db_value).await?; Ok(()) } async fn get_metadata(&self) -> Result<Option<ScanningMetadata>, UtxoScannerError> { let total_key = self.get_db_mode_key(); let value: Option<String> = self.resources.db.get_client_key_from_str(total_key).await?; match value { None => Ok(None), Some(v) => Ok(serde_json::from_str(&v)?), } } async fn clear_db(&self) -> Result<(), UtxoScannerError> { let total_key = self.get_db_mode_key(); let _ = self.resources.db.clear_client_value(total_key).await?; Ok(()) } fn publish_event(&self, event: UtxoScannerEvent) { let _ = self.event_sender.send(event); } /// A faux incoming transaction will be created to provide a record of the event of importing a UTXO. The TxId of /// the generated transaction is returned. pub async fn import_unblinded_utxo_to_transaction_service( &mut self, unblinded_output: UnblindedOutput, source_public_key: &CommsPublicKey, message: String, ) -> Result<TxId, WalletError> { let tx_id = self .resources .transaction_service .import_utxo( unblinded_output.value, source_public_key.clone(), message, Some(unblinded_output.features.maturity), ) .await?; info!( target: LOG_TARGET, "UTXO (Commitment: {}) imported into wallet", unblinded_output .as_transaction_input(&self.resources.factories.commitment)? .commitment .to_hex() ); Ok(tx_id) } pub async fn run(mut self) -> Result<(), UtxoScannerError> { loop { if self.shutdown_signal.is_triggered() { // if running is set to false, we know its been canceled upstream so lets exit the loop return Ok(()); } match self.get_next_peer() { Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((total_scanned, final_utxo_pos, elapsed)) => { debug!(target: LOG_TARGET, "Scanned to UTXO #{}", final_utxo_pos); self.finalize(total_scanned, final_utxo_pos, elapsed).await?; return Ok(()); }, Err(e) => { warn!( target: LOG_TARGET, "Failed to scan UTXO's from base node {}: {}", peer, e ); self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, error: e.to_string(), }); continue; }, }, None => { self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, error: "No new peers to try after this round".to_string(), }); if self.num_retries >= self.retry_limit { self.publish_event(UtxoScannerEvent::ScanningFailed); return Err(UtxoScannerError::UtxoScanningError(format!( "Failed to scan UTXO's after {} attempt(s) using all {} sync peer(s). Aborting...", self.num_retries, self.peer_seeds.len() ))); } self.num_retries += 1; // Reset peer index to try connect to the first peer again self.peer_index = 0; }, } } } fn get_next_peer(&mut self) -> Option<NodeId> { let peer = self.peer_seeds.get(self.peer_index).map(NodeId::from_public_key); self.peer_index += 1; peer } async fn get_birthday_metadata( &self, client: &mut BaseNodeSyncRpcClient, ) -> Result<ScanningMetadata, UtxoScannerError> { let birthday = self.resources.db.get_wallet_birthday().await?; // Calculate the unix epoch time of two days before the wallet birthday. This is to avoid any weird time zone // issues let epoch_time = (birthday.saturating_sub(2) as u64) * 60 * 60 * 24; let block_height = match client.get_height_at_time(epoch_time).await { Ok(b) => b, Err(e) => { warn!( target: LOG_TARGET, "Problem requesting `height_at_time` from Base Node: {}", e ); 0 }, }; let header = client.get_header_by_height(block_height).await?; let header = BlockHeader::try_from(header).map_err(|_| UtxoScannerError::ConversionError)?; info!( target: LOG_TARGET, "Fresh wallet recovery starting at Block {}", block_height ); Ok(ScanningMetadata { total_amount: Default::default(), number_of_utxos: 0, utxo_index: header.output_mmr_size, height_hash: header.hash(), }) } } fn convert_response_to_transaction_outputs( response: Vec<Result<proto::base_node::SyncUtxosResponse, RpcStatus>>, last_utxo_index: u64, ) -> Result<(Vec<TransactionOutput>, u64), UtxoScannerError> { let response: Vec<proto::base_node::SyncUtxosResponse> = response .into_iter() .map(|v| v.map_err(|e| UtxoScannerError::RpcStatus(e.to_string()))) .collect::<Result<Vec<_>, _>>()?;
let current_utxo_index = response // Assumes correct ordering which is otherwise not required for this protocol .last() .ok_or_else(|| {
random_line_split
cli.rs
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ //! Command-line interface for the main entry point. use clap::Clap; use log::{debug, error, info, LevelFilter}; use std::fs::File; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::Server; use crate::commit::Commit; use crate::logdir::LogdirLoader; use crate::proto::tensorboard::data; use crate::server::DataProviderHandler; use crate::types::PluginSamplingHint; use data::tensor_board_data_provider_server::TensorBoardDataProviderServer; pub mod dynamic_logdir; use dynamic_logdir::DynLogdir; #[derive(Clap, Debug)] #[clap(name = "rustboard", version = crate::VERSION)] struct Opts { /// Log directory to load /// /// Directory to recursively scan for event files (files matching the `*tfevents*` glob). This /// directory, its descendants, and its event files will be periodically polled for new data. /// /// If this log directory is invalid or unsupported, exits with status 8. #[clap(long, setting(clap::ArgSettings::AllowEmptyValues))] logdir: PathBuf, /// Bind to this host name /// /// Host to bind this server to. May be an IPv4 address (e.g., 127.0.0.1 or 0.0.0.0), an IPv6 /// address (e.g., ::1 or ::0), or a string like `localhost` to pass to `getaddrinfo(3)`. #[clap(long, default_value = "localhost")] host: String, /// Bind to this port /// /// Port to bind this server to. Use `0` to request an arbitrary free port from the OS. #[clap(long, default_value = "6806")] port: u16, /// Seconds to sleep between reloads, or "once" /// /// Number of seconds to wait between finishing one load cycle and starting the next one. This /// does not include the time for the reload itself. If "once", data will be loaded only once. #[clap(long, default_value = "5", value_name = "secs")] reload: ReloadStrategy, /// Use verbose output (-vv for very verbose output) #[clap(long = "verbose", short, parse(from_occurrences))] verbosity: u32, /// Kill this server once stdin is closed /// /// While this server is running, read stdin to end of file and then kill the server. Used to /// portably ensure that the server exits when the parent process dies, even due to a crash. /// Don't set this if stdin is connected to a tty and the process will be backgrounded, since /// then the server will receive `SIGTTIN` and its process will be stopped (in the `SIGSTOP` /// sense) but not killed. #[clap(long)] die_after_stdin: bool, /// Write bound port to this file /// /// Once a server socket is opened, write the port on which it's listening to the file at this /// path. Useful with `--port 0`. Port will be written as ASCII decimal followed by a newline /// (e.g., "6806\n"). If the server fails to start, this file may not be written at all. If the /// port file is specified but cannot be written, the server will die. /// /// This also suppresses the "listening on HOST:PORT" line that is otherwise written to stderr /// when the server starts. #[clap(long)] port_file: Option<PathBuf>, /// Write startup errors to this file /// /// If the logdir is invalid or unsupported, write the error message to this file instead of to /// stderr. That way, you can capture this output while still keeping stderr open for normal /// logging. #[clap(long)] error_file: Option<PathBuf>, /// Checksum all records (negate with `--no-checksum`) /// /// With `--checksum`, every record will be checksummed before being parsed. With /// `--no-checksum` (the default), records are only checksummed if parsing fails. Skipping /// checksums for records that successfully parse can be significantly faster, but also means /// that some bit flips may not be detected. #[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")] checksum: bool, /// Only checksum records that fail to parse /// /// Negates `--checksum`. This is the default. #[clap( long, multiple_occurrences = true, overrides_with = "checksum", hidden = true )] #[allow(unused)] no_checksum: bool, /// Set explicit series sampling /// /// A comma separated list of `plugin_name=num_samples` pairs to explicitly specify how many /// samples to keep per tag for the specified plugin. For unspecified plugins, series are /// randomly downsampled to reasonable values to prevent out-of-memory errors in long-running /// jobs. Each `num_samples` may be the special token `all` to retain all data without /// downsampling. For instance, `--samples_per_plugin=scalars=500,images=all,audio=0` keeps 500 /// events in each scalar series, all of the images, and none of the audio. #[clap(long, default_value = "", setting(clap::ArgSettings::AllowEmptyValues))] samples_per_plugin: PluginSamplingHint, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum ReloadStrategy { Loop { delay: Duration }, Once, } impl FromStr for ReloadStrategy { type Err = <u64 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "once" { Ok(ReloadStrategy::Once) } else { Ok(ReloadStrategy::Loop { delay: Duration::from_secs(s.parse()?), }) } } } /// Exit code for failure of [`DynLogdir::new`]. // Keep in sync with docs on `Opts::logdir`. const EXIT_BAD_LOGDIR: i32 = 8; const EXIT_FAILED_TO_BIND: i32 = 9; #[tokio::main] pub async fn main() -> Result<(), Box<dyn std::error::Error>> { let opts = Opts::parse(); init_logging(match opts.verbosity { 0 => LevelFilter::Warn, 1 => LevelFilter::Info, _ => LevelFilter::max(), }); debug!("Parsed options: {:?}", opts); let data_location = opts.logdir.display().to_string(); let error_file_path = opts.error_file.as_ref().map(PathBuf::as_ref); let reflection = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(crate::proto::FILE_DESCRIPTOR_SET) .build() .expect("failed to create gRPC reflection servicer"); // Create the logdir outside an async runtime (see docs for `DynLogdir::new`). let raw_logdir = opts.logdir; let logdir = tokio::task::spawn_blocking(|| DynLogdir::new(raw_logdir)) .await? .unwrap_or_else(|e| { write_startup_error(error_file_path, &e.to_string()); std::process::exit(EXIT_BAD_LOGDIR); }); if opts.die_after_stdin { thread::Builder::new() .name("StdinWatcher".to_string()) .spawn(die_after_stdin) .expect("failed to spawn stdin watcher thread"); } let addr = (opts.host.as_str(), opts.port); let listener = TcpListener::bind(addr).await.unwrap_or_else(|e| { let msg = format!("failed to bind to {:?}: {}", addr, e); write_startup_error(error_file_path, &msg); std::process::exit(EXIT_FAILED_TO_BIND); }); let bound = listener.local_addr()?; if let Some(port_file) = opts.port_file { let port = bound.port(); if let Err(e) = write_port_file(&port_file, port) { error!( "Failed to write port \"{}\" to {}: {}", port, port_file.display(), e ); std::process::exit(1); } info!("Wrote port \"{}\" to {}", port, port_file.display()); } else { eprintln!("listening on {:?}", bound); } let commit = Arc::new(Commit::new()); let psh_ref = Arc::new(opts.samples_per_plugin); thread::Builder::new() .name("Reloader".to_string()) .spawn({ let reload_strategy = opts.reload; let checksum = opts.checksum; let commit = Arc::clone(&commit); move || { let mut loader = LogdirLoader::new(&commit, logdir, 0, psh_ref); // Checksum only if `--checksum` given (i.e., off by default). loader.checksum(checksum); loop { info!("Starting load cycle"); let start = Instant::now(); loader.reload(); let end = Instant::now(); info!("Finished load cycle ({:?})", end - start); match reload_strategy { ReloadStrategy::Loop { delay } => thread::sleep(delay), ReloadStrategy::Once => break, }; } } }) .expect("failed to spawn reloader thread"); let handler = DataProviderHandler { data_location, commit, }; Server::builder() .add_service(TensorBoardDataProviderServer::new(handler)) .add_service(reflection) .serve_with_incoming(TcpListenerStream::new(listener)) .await?; Ok(()) } /// Installs a logging handler whose behavior is determined by the `RUST_LOG` environment variable /// (per <https://docs.rs/env_logger> semantics), or by including all logs at `default_log_level` /// or above if `RUST_LOG_LEVEL` is not given. fn init_logging(default_log_level: LevelFilter) { use env_logger::{Builder, Env}; Builder::from_env(Env::default().default_filter_or(default_log_level.to_string())).init(); } /// Locks stdin and reads it to EOF, then exits the process. fn die_after_stdin() { let stdin = std::io::stdin(); let stdin_lock = stdin.lock(); for _ in stdin_lock.bytes() {} info!("Stdin closed; exiting"); std::process::exit(0); } /// Writes `port` to file `path` as an ASCII decimal followed by newline. fn write_port_file(path: &Path, port: u16) -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", port)?; f.flush()?; Ok(()) } /// Writes error to the given file, or to stderr as a fallback. fn write_startup_error(path: Option<&Path>, error: &str) { let write_to_file = |path: &Path| -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", error)?; f.flush()?; Ok(()) }; if let Some(p) = path { if let Err(e) = write_to_file(p)
else { return; } } // fall back to stderr if no path given or if write failed eprintln!("fatal: {}", error); } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_reload() { assert_eq!("once".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Once)); assert_eq!( "5".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Loop { delay: Duration::from_secs(5) }) ); "5s".parse::<ReloadStrategy>() .expect_err("explicit \"s\" trailer should be forbidden"); } }
{ info!("Failed to write error to {:?}: {}", p, e); }
conditional_block
cli.rs
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ //! Command-line interface for the main entry point. use clap::Clap; use log::{debug, error, info, LevelFilter}; use std::fs::File; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::Server; use crate::commit::Commit; use crate::logdir::LogdirLoader; use crate::proto::tensorboard::data; use crate::server::DataProviderHandler; use crate::types::PluginSamplingHint; use data::tensor_board_data_provider_server::TensorBoardDataProviderServer; pub mod dynamic_logdir; use dynamic_logdir::DynLogdir; #[derive(Clap, Debug)] #[clap(name = "rustboard", version = crate::VERSION)] struct Opts { /// Log directory to load /// /// Directory to recursively scan for event files (files matching the `*tfevents*` glob). This /// directory, its descendants, and its event files will be periodically polled for new data. /// /// If this log directory is invalid or unsupported, exits with status 8. #[clap(long, setting(clap::ArgSettings::AllowEmptyValues))] logdir: PathBuf, /// Bind to this host name /// /// Host to bind this server to. May be an IPv4 address (e.g., 127.0.0.1 or 0.0.0.0), an IPv6 /// address (e.g., ::1 or ::0), or a string like `localhost` to pass to `getaddrinfo(3)`. #[clap(long, default_value = "localhost")] host: String, /// Bind to this port /// /// Port to bind this server to. Use `0` to request an arbitrary free port from the OS. #[clap(long, default_value = "6806")] port: u16, /// Seconds to sleep between reloads, or "once" /// /// Number of seconds to wait between finishing one load cycle and starting the next one. This /// does not include the time for the reload itself. If "once", data will be loaded only once. #[clap(long, default_value = "5", value_name = "secs")] reload: ReloadStrategy, /// Use verbose output (-vv for very verbose output) #[clap(long = "verbose", short, parse(from_occurrences))] verbosity: u32, /// Kill this server once stdin is closed /// /// While this server is running, read stdin to end of file and then kill the server. Used to /// portably ensure that the server exits when the parent process dies, even due to a crash. /// Don't set this if stdin is connected to a tty and the process will be backgrounded, since /// then the server will receive `SIGTTIN` and its process will be stopped (in the `SIGSTOP` /// sense) but not killed. #[clap(long)] die_after_stdin: bool, /// Write bound port to this file /// /// Once a server socket is opened, write the port on which it's listening to the file at this /// path. Useful with `--port 0`. Port will be written as ASCII decimal followed by a newline /// (e.g., "6806\n"). If the server fails to start, this file may not be written at all. If the /// port file is specified but cannot be written, the server will die. /// /// This also suppresses the "listening on HOST:PORT" line that is otherwise written to stderr /// when the server starts. #[clap(long)] port_file: Option<PathBuf>, /// Write startup errors to this file /// /// If the logdir is invalid or unsupported, write the error message to this file instead of to /// stderr. That way, you can capture this output while still keeping stderr open for normal /// logging. #[clap(long)] error_file: Option<PathBuf>, /// Checksum all records (negate with `--no-checksum`) /// /// With `--checksum`, every record will be checksummed before being parsed. With /// `--no-checksum` (the default), records are only checksummed if parsing fails. Skipping /// checksums for records that successfully parse can be significantly faster, but also means /// that some bit flips may not be detected. #[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")] checksum: bool, /// Only checksum records that fail to parse /// /// Negates `--checksum`. This is the default. #[clap( long, multiple_occurrences = true, overrides_with = "checksum", hidden = true )] #[allow(unused)] no_checksum: bool, /// Set explicit series sampling /// /// A comma separated list of `plugin_name=num_samples` pairs to explicitly specify how many /// samples to keep per tag for the specified plugin. For unspecified plugins, series are /// randomly downsampled to reasonable values to prevent out-of-memory errors in long-running /// jobs. Each `num_samples` may be the special token `all` to retain all data without /// downsampling. For instance, `--samples_per_plugin=scalars=500,images=all,audio=0` keeps 500 /// events in each scalar series, all of the images, and none of the audio. #[clap(long, default_value = "", setting(clap::ArgSettings::AllowEmptyValues))] samples_per_plugin: PluginSamplingHint, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum ReloadStrategy { Loop { delay: Duration }, Once, } impl FromStr for ReloadStrategy { type Err = <u64 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err>
} /// Exit code for failure of [`DynLogdir::new`]. // Keep in sync with docs on `Opts::logdir`. const EXIT_BAD_LOGDIR: i32 = 8; const EXIT_FAILED_TO_BIND: i32 = 9; #[tokio::main] pub async fn main() -> Result<(), Box<dyn std::error::Error>> { let opts = Opts::parse(); init_logging(match opts.verbosity { 0 => LevelFilter::Warn, 1 => LevelFilter::Info, _ => LevelFilter::max(), }); debug!("Parsed options: {:?}", opts); let data_location = opts.logdir.display().to_string(); let error_file_path = opts.error_file.as_ref().map(PathBuf::as_ref); let reflection = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(crate::proto::FILE_DESCRIPTOR_SET) .build() .expect("failed to create gRPC reflection servicer"); // Create the logdir outside an async runtime (see docs for `DynLogdir::new`). let raw_logdir = opts.logdir; let logdir = tokio::task::spawn_blocking(|| DynLogdir::new(raw_logdir)) .await? .unwrap_or_else(|e| { write_startup_error(error_file_path, &e.to_string()); std::process::exit(EXIT_BAD_LOGDIR); }); if opts.die_after_stdin { thread::Builder::new() .name("StdinWatcher".to_string()) .spawn(die_after_stdin) .expect("failed to spawn stdin watcher thread"); } let addr = (opts.host.as_str(), opts.port); let listener = TcpListener::bind(addr).await.unwrap_or_else(|e| { let msg = format!("failed to bind to {:?}: {}", addr, e); write_startup_error(error_file_path, &msg); std::process::exit(EXIT_FAILED_TO_BIND); }); let bound = listener.local_addr()?; if let Some(port_file) = opts.port_file { let port = bound.port(); if let Err(e) = write_port_file(&port_file, port) { error!( "Failed to write port \"{}\" to {}: {}", port, port_file.display(), e ); std::process::exit(1); } info!("Wrote port \"{}\" to {}", port, port_file.display()); } else { eprintln!("listening on {:?}", bound); } let commit = Arc::new(Commit::new()); let psh_ref = Arc::new(opts.samples_per_plugin); thread::Builder::new() .name("Reloader".to_string()) .spawn({ let reload_strategy = opts.reload; let checksum = opts.checksum; let commit = Arc::clone(&commit); move || { let mut loader = LogdirLoader::new(&commit, logdir, 0, psh_ref); // Checksum only if `--checksum` given (i.e., off by default). loader.checksum(checksum); loop { info!("Starting load cycle"); let start = Instant::now(); loader.reload(); let end = Instant::now(); info!("Finished load cycle ({:?})", end - start); match reload_strategy { ReloadStrategy::Loop { delay } => thread::sleep(delay), ReloadStrategy::Once => break, }; } } }) .expect("failed to spawn reloader thread"); let handler = DataProviderHandler { data_location, commit, }; Server::builder() .add_service(TensorBoardDataProviderServer::new(handler)) .add_service(reflection) .serve_with_incoming(TcpListenerStream::new(listener)) .await?; Ok(()) } /// Installs a logging handler whose behavior is determined by the `RUST_LOG` environment variable /// (per <https://docs.rs/env_logger> semantics), or by including all logs at `default_log_level` /// or above if `RUST_LOG_LEVEL` is not given. fn init_logging(default_log_level: LevelFilter) { use env_logger::{Builder, Env}; Builder::from_env(Env::default().default_filter_or(default_log_level.to_string())).init(); } /// Locks stdin and reads it to EOF, then exits the process. fn die_after_stdin() { let stdin = std::io::stdin(); let stdin_lock = stdin.lock(); for _ in stdin_lock.bytes() {} info!("Stdin closed; exiting"); std::process::exit(0); } /// Writes `port` to file `path` as an ASCII decimal followed by newline. fn write_port_file(path: &Path, port: u16) -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", port)?; f.flush()?; Ok(()) } /// Writes error to the given file, or to stderr as a fallback. fn write_startup_error(path: Option<&Path>, error: &str) { let write_to_file = |path: &Path| -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", error)?; f.flush()?; Ok(()) }; if let Some(p) = path { if let Err(e) = write_to_file(p) { info!("Failed to write error to {:?}: {}", p, e); } else { return; } } // fall back to stderr if no path given or if write failed eprintln!("fatal: {}", error); } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_reload() { assert_eq!("once".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Once)); assert_eq!( "5".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Loop { delay: Duration::from_secs(5) }) ); "5s".parse::<ReloadStrategy>() .expect_err("explicit \"s\" trailer should be forbidden"); } }
{ if s == "once" { Ok(ReloadStrategy::Once) } else { Ok(ReloadStrategy::Loop { delay: Duration::from_secs(s.parse()?), }) } }
identifier_body
cli.rs
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ //! Command-line interface for the main entry point. use clap::Clap; use log::{debug, error, info, LevelFilter}; use std::fs::File; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::Server; use crate::commit::Commit; use crate::logdir::LogdirLoader; use crate::proto::tensorboard::data; use crate::server::DataProviderHandler; use crate::types::PluginSamplingHint; use data::tensor_board_data_provider_server::TensorBoardDataProviderServer; pub mod dynamic_logdir; use dynamic_logdir::DynLogdir; #[derive(Clap, Debug)] #[clap(name = "rustboard", version = crate::VERSION)] struct Opts { /// Log directory to load /// /// Directory to recursively scan for event files (files matching the `*tfevents*` glob). This /// directory, its descendants, and its event files will be periodically polled for new data. /// /// If this log directory is invalid or unsupported, exits with status 8. #[clap(long, setting(clap::ArgSettings::AllowEmptyValues))] logdir: PathBuf, /// Bind to this host name /// /// Host to bind this server to. May be an IPv4 address (e.g., 127.0.0.1 or 0.0.0.0), an IPv6 /// address (e.g., ::1 or ::0), or a string like `localhost` to pass to `getaddrinfo(3)`. #[clap(long, default_value = "localhost")] host: String, /// Bind to this port /// /// Port to bind this server to. Use `0` to request an arbitrary free port from the OS. #[clap(long, default_value = "6806")] port: u16, /// Seconds to sleep between reloads, or "once" /// /// Number of seconds to wait between finishing one load cycle and starting the next one. This /// does not include the time for the reload itself. If "once", data will be loaded only once. #[clap(long, default_value = "5", value_name = "secs")] reload: ReloadStrategy, /// Use verbose output (-vv for very verbose output) #[clap(long = "verbose", short, parse(from_occurrences))] verbosity: u32, /// Kill this server once stdin is closed /// /// While this server is running, read stdin to end of file and then kill the server. Used to /// portably ensure that the server exits when the parent process dies, even due to a crash. /// Don't set this if stdin is connected to a tty and the process will be backgrounded, since /// then the server will receive `SIGTTIN` and its process will be stopped (in the `SIGSTOP` /// sense) but not killed. #[clap(long)] die_after_stdin: bool, /// Write bound port to this file /// /// Once a server socket is opened, write the port on which it's listening to the file at this /// path. Useful with `--port 0`. Port will be written as ASCII decimal followed by a newline /// (e.g., "6806\n"). If the server fails to start, this file may not be written at all. If the /// port file is specified but cannot be written, the server will die. /// /// This also suppresses the "listening on HOST:PORT" line that is otherwise written to stderr /// when the server starts. #[clap(long)] port_file: Option<PathBuf>, /// Write startup errors to this file /// /// If the logdir is invalid or unsupported, write the error message to this file instead of to /// stderr. That way, you can capture this output while still keeping stderr open for normal /// logging. #[clap(long)] error_file: Option<PathBuf>, /// Checksum all records (negate with `--no-checksum`) /// /// With `--checksum`, every record will be checksummed before being parsed. With /// `--no-checksum` (the default), records are only checksummed if parsing fails. Skipping /// checksums for records that successfully parse can be significantly faster, but also means /// that some bit flips may not be detected. #[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")] checksum: bool, /// Only checksum records that fail to parse /// /// Negates `--checksum`. This is the default. #[clap( long, multiple_occurrences = true, overrides_with = "checksum", hidden = true )] #[allow(unused)] no_checksum: bool, /// Set explicit series sampling /// /// A comma separated list of `plugin_name=num_samples` pairs to explicitly specify how many /// samples to keep per tag for the specified plugin. For unspecified plugins, series are /// randomly downsampled to reasonable values to prevent out-of-memory errors in long-running /// jobs. Each `num_samples` may be the special token `all` to retain all data without /// downsampling. For instance, `--samples_per_plugin=scalars=500,images=all,audio=0` keeps 500 /// events in each scalar series, all of the images, and none of the audio. #[clap(long, default_value = "", setting(clap::ArgSettings::AllowEmptyValues))] samples_per_plugin: PluginSamplingHint, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum ReloadStrategy { Loop { delay: Duration }, Once, } impl FromStr for ReloadStrategy { type Err = <u64 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "once" { Ok(ReloadStrategy::Once) } else { Ok(ReloadStrategy::Loop {
delay: Duration::from_secs(s.parse()?), }) } } } /// Exit code for failure of [`DynLogdir::new`]. // Keep in sync with docs on `Opts::logdir`. const EXIT_BAD_LOGDIR: i32 = 8; const EXIT_FAILED_TO_BIND: i32 = 9; #[tokio::main] pub async fn main() -> Result<(), Box<dyn std::error::Error>> { let opts = Opts::parse(); init_logging(match opts.verbosity { 0 => LevelFilter::Warn, 1 => LevelFilter::Info, _ => LevelFilter::max(), }); debug!("Parsed options: {:?}", opts); let data_location = opts.logdir.display().to_string(); let error_file_path = opts.error_file.as_ref().map(PathBuf::as_ref); let reflection = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(crate::proto::FILE_DESCRIPTOR_SET) .build() .expect("failed to create gRPC reflection servicer"); // Create the logdir outside an async runtime (see docs for `DynLogdir::new`). let raw_logdir = opts.logdir; let logdir = tokio::task::spawn_blocking(|| DynLogdir::new(raw_logdir)) .await? .unwrap_or_else(|e| { write_startup_error(error_file_path, &e.to_string()); std::process::exit(EXIT_BAD_LOGDIR); }); if opts.die_after_stdin { thread::Builder::new() .name("StdinWatcher".to_string()) .spawn(die_after_stdin) .expect("failed to spawn stdin watcher thread"); } let addr = (opts.host.as_str(), opts.port); let listener = TcpListener::bind(addr).await.unwrap_or_else(|e| { let msg = format!("failed to bind to {:?}: {}", addr, e); write_startup_error(error_file_path, &msg); std::process::exit(EXIT_FAILED_TO_BIND); }); let bound = listener.local_addr()?; if let Some(port_file) = opts.port_file { let port = bound.port(); if let Err(e) = write_port_file(&port_file, port) { error!( "Failed to write port \"{}\" to {}: {}", port, port_file.display(), e ); std::process::exit(1); } info!("Wrote port \"{}\" to {}", port, port_file.display()); } else { eprintln!("listening on {:?}", bound); } let commit = Arc::new(Commit::new()); let psh_ref = Arc::new(opts.samples_per_plugin); thread::Builder::new() .name("Reloader".to_string()) .spawn({ let reload_strategy = opts.reload; let checksum = opts.checksum; let commit = Arc::clone(&commit); move || { let mut loader = LogdirLoader::new(&commit, logdir, 0, psh_ref); // Checksum only if `--checksum` given (i.e., off by default). loader.checksum(checksum); loop { info!("Starting load cycle"); let start = Instant::now(); loader.reload(); let end = Instant::now(); info!("Finished load cycle ({:?})", end - start); match reload_strategy { ReloadStrategy::Loop { delay } => thread::sleep(delay), ReloadStrategy::Once => break, }; } } }) .expect("failed to spawn reloader thread"); let handler = DataProviderHandler { data_location, commit, }; Server::builder() .add_service(TensorBoardDataProviderServer::new(handler)) .add_service(reflection) .serve_with_incoming(TcpListenerStream::new(listener)) .await?; Ok(()) } /// Installs a logging handler whose behavior is determined by the `RUST_LOG` environment variable /// (per <https://docs.rs/env_logger> semantics), or by including all logs at `default_log_level` /// or above if `RUST_LOG_LEVEL` is not given. fn init_logging(default_log_level: LevelFilter) { use env_logger::{Builder, Env}; Builder::from_env(Env::default().default_filter_or(default_log_level.to_string())).init(); } /// Locks stdin and reads it to EOF, then exits the process. fn die_after_stdin() { let stdin = std::io::stdin(); let stdin_lock = stdin.lock(); for _ in stdin_lock.bytes() {} info!("Stdin closed; exiting"); std::process::exit(0); } /// Writes `port` to file `path` as an ASCII decimal followed by newline. fn write_port_file(path: &Path, port: u16) -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", port)?; f.flush()?; Ok(()) } /// Writes error to the given file, or to stderr as a fallback. fn write_startup_error(path: Option<&Path>, error: &str) { let write_to_file = |path: &Path| -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", error)?; f.flush()?; Ok(()) }; if let Some(p) = path { if let Err(e) = write_to_file(p) { info!("Failed to write error to {:?}: {}", p, e); } else { return; } } // fall back to stderr if no path given or if write failed eprintln!("fatal: {}", error); } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_reload() { assert_eq!("once".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Once)); assert_eq!( "5".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Loop { delay: Duration::from_secs(5) }) ); "5s".parse::<ReloadStrategy>() .expect_err("explicit \"s\" trailer should be forbidden"); } }
random_line_split
cli.rs
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ //! Command-line interface for the main entry point. use clap::Clap; use log::{debug, error, info, LevelFilter}; use std::fs::File; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; use tonic::transport::Server; use crate::commit::Commit; use crate::logdir::LogdirLoader; use crate::proto::tensorboard::data; use crate::server::DataProviderHandler; use crate::types::PluginSamplingHint; use data::tensor_board_data_provider_server::TensorBoardDataProviderServer; pub mod dynamic_logdir; use dynamic_logdir::DynLogdir; #[derive(Clap, Debug)] #[clap(name = "rustboard", version = crate::VERSION)] struct Opts { /// Log directory to load /// /// Directory to recursively scan for event files (files matching the `*tfevents*` glob). This /// directory, its descendants, and its event files will be periodically polled for new data. /// /// If this log directory is invalid or unsupported, exits with status 8. #[clap(long, setting(clap::ArgSettings::AllowEmptyValues))] logdir: PathBuf, /// Bind to this host name /// /// Host to bind this server to. May be an IPv4 address (e.g., 127.0.0.1 or 0.0.0.0), an IPv6 /// address (e.g., ::1 or ::0), or a string like `localhost` to pass to `getaddrinfo(3)`. #[clap(long, default_value = "localhost")] host: String, /// Bind to this port /// /// Port to bind this server to. Use `0` to request an arbitrary free port from the OS. #[clap(long, default_value = "6806")] port: u16, /// Seconds to sleep between reloads, or "once" /// /// Number of seconds to wait between finishing one load cycle and starting the next one. This /// does not include the time for the reload itself. If "once", data will be loaded only once. #[clap(long, default_value = "5", value_name = "secs")] reload: ReloadStrategy, /// Use verbose output (-vv for very verbose output) #[clap(long = "verbose", short, parse(from_occurrences))] verbosity: u32, /// Kill this server once stdin is closed /// /// While this server is running, read stdin to end of file and then kill the server. Used to /// portably ensure that the server exits when the parent process dies, even due to a crash. /// Don't set this if stdin is connected to a tty and the process will be backgrounded, since /// then the server will receive `SIGTTIN` and its process will be stopped (in the `SIGSTOP` /// sense) but not killed. #[clap(long)] die_after_stdin: bool, /// Write bound port to this file /// /// Once a server socket is opened, write the port on which it's listening to the file at this /// path. Useful with `--port 0`. Port will be written as ASCII decimal followed by a newline /// (e.g., "6806\n"). If the server fails to start, this file may not be written at all. If the /// port file is specified but cannot be written, the server will die. /// /// This also suppresses the "listening on HOST:PORT" line that is otherwise written to stderr /// when the server starts. #[clap(long)] port_file: Option<PathBuf>, /// Write startup errors to this file /// /// If the logdir is invalid or unsupported, write the error message to this file instead of to /// stderr. That way, you can capture this output while still keeping stderr open for normal /// logging. #[clap(long)] error_file: Option<PathBuf>, /// Checksum all records (negate with `--no-checksum`) /// /// With `--checksum`, every record will be checksummed before being parsed. With /// `--no-checksum` (the default), records are only checksummed if parsing fails. Skipping /// checksums for records that successfully parse can be significantly faster, but also means /// that some bit flips may not be detected. #[clap(long, multiple_occurrences = true, overrides_with = "no_checksum")] checksum: bool, /// Only checksum records that fail to parse /// /// Negates `--checksum`. This is the default. #[clap( long, multiple_occurrences = true, overrides_with = "checksum", hidden = true )] #[allow(unused)] no_checksum: bool, /// Set explicit series sampling /// /// A comma separated list of `plugin_name=num_samples` pairs to explicitly specify how many /// samples to keep per tag for the specified plugin. For unspecified plugins, series are /// randomly downsampled to reasonable values to prevent out-of-memory errors in long-running /// jobs. Each `num_samples` may be the special token `all` to retain all data without /// downsampling. For instance, `--samples_per_plugin=scalars=500,images=all,audio=0` keeps 500 /// events in each scalar series, all of the images, and none of the audio. #[clap(long, default_value = "", setting(clap::ArgSettings::AllowEmptyValues))] samples_per_plugin: PluginSamplingHint, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum ReloadStrategy { Loop { delay: Duration }, Once, } impl FromStr for ReloadStrategy { type Err = <u64 as FromStr>::Err; fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "once" { Ok(ReloadStrategy::Once) } else { Ok(ReloadStrategy::Loop { delay: Duration::from_secs(s.parse()?), }) } } } /// Exit code for failure of [`DynLogdir::new`]. // Keep in sync with docs on `Opts::logdir`. const EXIT_BAD_LOGDIR: i32 = 8; const EXIT_FAILED_TO_BIND: i32 = 9; #[tokio::main] pub async fn main() -> Result<(), Box<dyn std::error::Error>> { let opts = Opts::parse(); init_logging(match opts.verbosity { 0 => LevelFilter::Warn, 1 => LevelFilter::Info, _ => LevelFilter::max(), }); debug!("Parsed options: {:?}", opts); let data_location = opts.logdir.display().to_string(); let error_file_path = opts.error_file.as_ref().map(PathBuf::as_ref); let reflection = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(crate::proto::FILE_DESCRIPTOR_SET) .build() .expect("failed to create gRPC reflection servicer"); // Create the logdir outside an async runtime (see docs for `DynLogdir::new`). let raw_logdir = opts.logdir; let logdir = tokio::task::spawn_blocking(|| DynLogdir::new(raw_logdir)) .await? .unwrap_or_else(|e| { write_startup_error(error_file_path, &e.to_string()); std::process::exit(EXIT_BAD_LOGDIR); }); if opts.die_after_stdin { thread::Builder::new() .name("StdinWatcher".to_string()) .spawn(die_after_stdin) .expect("failed to spawn stdin watcher thread"); } let addr = (opts.host.as_str(), opts.port); let listener = TcpListener::bind(addr).await.unwrap_or_else(|e| { let msg = format!("failed to bind to {:?}: {}", addr, e); write_startup_error(error_file_path, &msg); std::process::exit(EXIT_FAILED_TO_BIND); }); let bound = listener.local_addr()?; if let Some(port_file) = opts.port_file { let port = bound.port(); if let Err(e) = write_port_file(&port_file, port) { error!( "Failed to write port \"{}\" to {}: {}", port, port_file.display(), e ); std::process::exit(1); } info!("Wrote port \"{}\" to {}", port, port_file.display()); } else { eprintln!("listening on {:?}", bound); } let commit = Arc::new(Commit::new()); let psh_ref = Arc::new(opts.samples_per_plugin); thread::Builder::new() .name("Reloader".to_string()) .spawn({ let reload_strategy = opts.reload; let checksum = opts.checksum; let commit = Arc::clone(&commit); move || { let mut loader = LogdirLoader::new(&commit, logdir, 0, psh_ref); // Checksum only if `--checksum` given (i.e., off by default). loader.checksum(checksum); loop { info!("Starting load cycle"); let start = Instant::now(); loader.reload(); let end = Instant::now(); info!("Finished load cycle ({:?})", end - start); match reload_strategy { ReloadStrategy::Loop { delay } => thread::sleep(delay), ReloadStrategy::Once => break, }; } } }) .expect("failed to spawn reloader thread"); let handler = DataProviderHandler { data_location, commit, }; Server::builder() .add_service(TensorBoardDataProviderServer::new(handler)) .add_service(reflection) .serve_with_incoming(TcpListenerStream::new(listener)) .await?; Ok(()) } /// Installs a logging handler whose behavior is determined by the `RUST_LOG` environment variable /// (per <https://docs.rs/env_logger> semantics), or by including all logs at `default_log_level` /// or above if `RUST_LOG_LEVEL` is not given. fn
(default_log_level: LevelFilter) { use env_logger::{Builder, Env}; Builder::from_env(Env::default().default_filter_or(default_log_level.to_string())).init(); } /// Locks stdin and reads it to EOF, then exits the process. fn die_after_stdin() { let stdin = std::io::stdin(); let stdin_lock = stdin.lock(); for _ in stdin_lock.bytes() {} info!("Stdin closed; exiting"); std::process::exit(0); } /// Writes `port` to file `path` as an ASCII decimal followed by newline. fn write_port_file(path: &Path, port: u16) -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", port)?; f.flush()?; Ok(()) } /// Writes error to the given file, or to stderr as a fallback. fn write_startup_error(path: Option<&Path>, error: &str) { let write_to_file = |path: &Path| -> std::io::Result<()> { let mut f = File::create(path)?; writeln!(f, "{}", error)?; f.flush()?; Ok(()) }; if let Some(p) = path { if let Err(e) = write_to_file(p) { info!("Failed to write error to {:?}: {}", p, e); } else { return; } } // fall back to stderr if no path given or if write failed eprintln!("fatal: {}", error); } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_reload() { assert_eq!("once".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Once)); assert_eq!( "5".parse::<ReloadStrategy>(), Ok(ReloadStrategy::Loop { delay: Duration::from_secs(5) }) ); "5s".parse::<ReloadStrategy>() .expect_err("explicit \"s\" trailer should be forbidden"); } }
init_logging
identifier_name
runner.rs
use diesel::prelude::*; #[cfg(feature = "r2d2")] use diesel::r2d2; use std::any::Any; use std::error::Error; use std::panic::{catch_unwind, AssertUnwindSafe, PanicInfo, RefUnwindSafe, UnwindSafe}; use std::sync::Arc; use std::time::Duration; use threadpool::ThreadPool; use crate::db::*; use crate::errors::*; use crate::{storage, Registry}; use event::*; mod channel; mod event; pub struct NoConnectionPoolGiven; #[allow(missing_debug_implementations)] pub struct Builder<Env, ConnectionPoolBuilder> { connection_pool_or_builder: ConnectionPoolBuilder, environment: Env, thread_count: Option<usize>, job_start_timeout: Option<Duration>, } impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Set the number of threads to be used to run jobs concurrently. /// /// Defaults to 5 pub fn thread_count(mut self, thread_count: usize) -> Self { self.thread_count = Some(thread_count); self } fn get_thread_count(&self) -> usize { self.thread_count.unwrap_or(5) } /// The amount of time to wait for a job to start before assuming an error /// has occurred. /// /// Defaults to 10 seconds. pub fn job_start_timeout(mut self, timeout: Duration) -> Self { self.job_start_timeout = Some(timeout); self } /// Provide a connection pool to be used by the runner pub fn connection_pool<NewPool>(self, pool: NewPool) -> Builder<Env, NewPool> { Builder { connection_pool_or_builder: pool, environment: self.environment, thread_count: self.thread_count, job_start_timeout: self.job_start_timeout, } } } #[cfg(feature = "r2d2")] impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Build the runner with an r2d2 connection pool /// /// This will override any connection pool previously provided pub fn database_url<S: Into<String>>(self, database_url: S) -> Builder<Env, R2d2Builder> { self.connection_pool_builder(database_url, r2d2::Builder::new()) } /// Provide a connection pool builder. /// /// This will override any connection pool previously provided. /// /// You should call this method if you want to provide additional /// configuration for the database connection pool. The builder will be /// configured to have its max size set to the value given to `2 * thread_count`. /// To override this behavior, call [`connection_count`](Self::connection_count) pub fn connection_pool_builder<S: Into<String>>( self, database_url: S, builder: r2d2::Builder<r2d2::ConnectionManager<PgConnection>>, ) -> Builder<Env, R2d2Builder> { self.connection_pool(R2d2Builder::new(database_url.into(), builder)) } } #[cfg(feature = "r2d2")] impl<Env> Builder<Env, R2d2Builder> { /// Set the max size of the database connection pool pub fn connection_count(mut self, connection_count: u32) -> Self { self.connection_pool_or_builder .connection_count(connection_count); self } /// Build the runner with an r2d2 connection pool. pub fn build(self) -> Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>> { let thread_count = self.get_thread_count(); let connection_pool_size = thread_count as u32 * 2; let connection_pool = self.connection_pool_or_builder.build(connection_pool_size); Runner { connection_pool, thread_pool: ThreadPool::new(thread_count), environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } impl<Env, ConnectionPool> Builder<Env, ConnectionPool> where ConnectionPool: DieselPool, { /// Build the runner pub fn build(self) -> Runner<Env, ConnectionPool> { Runner { thread_pool: ThreadPool::new(self.get_thread_count()), connection_pool: self.connection_pool_or_builder, environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } #[allow(missing_debug_implementations)] /// The core runner responsible for locking and running jobs pub struct
<Env:'static, ConnectionPool> { connection_pool: ConnectionPool, thread_pool: ThreadPool, environment: Arc<Env>, registry: Arc<Registry<Env>>, job_start_timeout: Duration, } impl<Env> Runner<Env, NoConnectionPoolGiven> { /// Create a builder for a job runner /// /// This method takes the two required configurations, the database /// connection pool, and the environment to pass to your jobs. If your /// environment contains a connection pool, it should be the same pool given /// here. pub fn builder(environment: Env) -> Builder<Env, NoConnectionPoolGiven> { Builder { connection_pool_or_builder: NoConnectionPoolGiven, environment, thread_count: None, job_start_timeout: None, } } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> { #[doc(hidden)] /// For use in integration tests pub fn connection_pool(&self) -> &ConnectionPool { &self.connection_pool } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> where Env: RefUnwindSafe + Send + Sync +'static, ConnectionPool: DieselPool +'static, { /// Runs all pending jobs in the queue /// /// This function will return once all jobs in the queue have begun running, /// but does not wait for them to complete. When this function returns, at /// least one thread will have tried to acquire a new job, and found there /// were none in the queue. pub fn run_all_pending_jobs(&self) -> Result<(), FetchError<ConnectionPool>> { use std::cmp::max; let max_threads = self.thread_pool.max_count(); let (sender, receiver) = channel::new(max_threads); let mut pending_messages = 0; loop { let available_threads = max_threads - self.thread_pool.active_count(); let jobs_to_queue = if pending_messages == 0 { // If we have no queued jobs talking to us, and there are no // available threads, we still need to queue at least one job // or we'll never receive a message max(available_threads, 1) } else { available_threads }; for _ in 0..jobs_to_queue { self.run_single_job(sender.clone()); } pending_messages += jobs_to_queue; match receiver.recv_timeout(self.job_start_timeout) { Ok(Event::Working) => pending_messages -= 1, Ok(Event::NoJobAvailable) => return Ok(()), Ok(Event::ErrorLoadingJob(e)) => return Err(FetchError::FailedLoadingJob(e)), Ok(Event::FailedToAcquireConnection(e)) => { return Err(FetchError::NoDatabaseConnection(e)); } Err(_) => return Err(FetchError::NoMessageReceived), } } } fn run_single_job(&self, sender: EventSender<ConnectionPool>) { let environment = Arc::clone(&self.environment); let registry = Arc::clone(&self.registry); // FIXME: https://github.com/sfackler/r2d2/pull/70 let connection_pool = AssertUnwindSafe(self.connection_pool().clone()); self.get_single_job(sender, move |job| { let perform_job = registry .get(&job.job_type) .ok_or_else(|| PerformError::from(format!("Unknown job type {}", job.job_type)))?; perform_job.perform(job.data, &environment, &connection_pool.0) }) } fn get_single_job<F>(&self, sender: EventSender<ConnectionPool>, f: F) where F: FnOnce(storage::BackgroundJob) -> Result<(), PerformError> + Send + UnwindSafe +'static, { use diesel::result::Error::RollbackTransaction; // The connection may not be `Send` so we need to clone the pool instead let pool = self.connection_pool.clone(); self.thread_pool.execute(move || { let conn = match pool.get() { Ok(conn) => conn, Err(e) => { sender.send(Event::FailedToAcquireConnection(e)); return; } }; let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { let job = match storage::find_next_unlocked_job(&conn).optional() { Ok(Some(j)) => { sender.send(Event::Working); j } Ok(None) => { sender.send(Event::NoJobAvailable); return Ok(()); } Err(e) => { sender.send(Event::ErrorLoadingJob(e)); return Err(RollbackTransaction); } }; let job_id = job.id; let result = catch_unwind(|| f(job)) .map_err(|e| try_to_extract_panic_info(&e)) .and_then(|r| r); match result { Ok(_) => storage::delete_successful_job(&conn, job_id)?, Err(e) => { eprintln!("Job {} failed to run: {}", job_id, e); storage::update_failed_job(&conn, job_id); } } Ok(()) }); match job_run_result { Ok(_) | Err(RollbackTransaction) => {} Err(e) => { panic!("Failed to update job: {:?}", e); } } }) } fn connection(&self) -> Result<DieselPooledConn<ConnectionPool>, Box<dyn Error + Send + Sync>> { self.connection_pool.get().map_err(Into::into) } /// Waits for all running jobs to complete, and returns an error if any /// failed /// /// This function is intended for use in tests. If any jobs have failed, it /// will return `swirl::JobsFailed` with the number of jobs that failed. /// /// If any other unexpected errors occurred, such as panicked worker threads /// or an error loading the job count from the database, an opaque error /// will be returned. pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { self.wait_for_jobs()?; let failed_jobs = storage::failed_job_count(&*self.connection()?)?; if failed_jobs == 0 { Ok(()) } else { Err(JobsFailed(failed_jobs)) } } fn wait_for_jobs(&self) -> Result<(), Box<dyn Error + Send + Sync>> { self.thread_pool.join(); let panic_count = self.thread_pool.panic_count(); if panic_count == 0 { Ok(()) } else { Err(format!("{} threads panicked", panic_count).into()) } } } /// Try to figure out what's in the box, and print it if we can. /// /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. /// However, the `panic::set_hook` functions deal with a `PanicInfo` type, and its payload is /// documented as "commonly but not always `&'static str` or `String`". So we can try all of those, /// and give up if we didn't get one of those three types. fn try_to_extract_panic_info(info: &(dyn Any + Send +'static)) -> PerformError { if let Some(x) = info.downcast_ref::<PanicInfo>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<&'static str>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<String>() { format!("job panicked: {}", x).into() } else { "job panicked".into() } } #[cfg(test)] mod tests { use diesel::prelude::*; use diesel::r2d2; use super::*; use crate::schema::background_jobs::dsl::*; use std::panic::AssertUnwindSafe; use std::sync::{Arc, Barrier, Mutex, MutexGuard}; #[test] fn jobs_are_locked_when_fetched() { let _guard = TestGuard::lock(); let runner = runner(); let first_job_id = create_dummy_job(&runner).id; let second_job_id = create_dummy_job(&runner).id; let fetch_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let fetch_barrier2 = fetch_barrier.clone(); let return_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let return_barrier2 = return_barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |job| { fetch_barrier.0.wait(); // Tell thread 2 it can lock its job assert_eq!(first_job_id, job.id); return_barrier.0.wait(); // Wait for thread 2 to lock its job Ok(()) }); fetch_barrier2.0.wait(); // Wait until thread 1 locks its job runner.get_single_job(channel::dummy_sender(), move |job| { assert_eq!(second_job_id, job.id); return_barrier2.0.wait(); // Tell thread 1 it can unlock its job Ok(()) }); runner.wait_for_jobs().unwrap(); } #[test] fn jobs_are_deleted_when_successfully_run() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); runner.get_single_job(channel::dummy_sender(), |_| Ok(())); runner.wait_for_jobs().unwrap(); let remaining_jobs = background_jobs .count() .get_result(&*runner.connection().unwrap()); assert_eq!(Ok(0), remaining_jobs); } #[test] fn failed_jobs_do_not_release_lock_before_updating_retry_time() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); let barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let barrier2 = barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |_| { barrier.0.wait(); // error so the job goes back into the queue Err("nope".into()) }); let conn = runner.connection().unwrap(); // Wait for the first thread to acquire the lock barrier2.0.wait(); // We are intentionally not using `get_single_job` here. // `SKIP LOCKED` is intentionally omitted here, so we block until // the lock on the first job is released. // If there is any point where the row is unlocked, but the retry // count is not updated, we will get a row here. let available_jobs = background_jobs .select(id) .filter(retries.eq(0)) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(0, available_jobs.len()); // Sanity check to make sure the job actually is there let total_jobs_including_failed = background_jobs .select(id) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(1, total_jobs_including_failed.len()); runner.wait_for_jobs().unwrap(); } #[test] fn panicking_in_jobs_updates_retry_counter() { let _guard = TestGuard::lock(); let runner = runner(); let job_id = create_dummy_job(&runner).id; runner.get_single_job(channel::dummy_sender(), |_| panic!()); runner.wait_for_jobs().unwrap(); let tries = background_jobs .find(job_id) .select(retries) .for_update() .first::<i32>(&*runner.connection().unwrap()) .unwrap(); assert_eq!(1, tries); } lazy_static::lazy_static! { // Since these tests deal with behavior concerning multiple connections // running concurrently, they have to run outside of a transaction. // Therefore we can't run more than one at a time. // // Rather than forcing the whole suite to be run with `--test-threads 1`, // we just lock these tests instead. static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } struct TestGuard<'a>(MutexGuard<'a, ()>); impl<'a> TestGuard<'a> { fn lock() -> Self { TestGuard(TEST_MUTEX.lock().unwrap()) } } impl<'a> Drop for TestGuard<'a> { fn drop(&mut self) { ::diesel::sql_query("TRUNCATE TABLE background_jobs") .execute(&*runner().connection().unwrap()) .unwrap(); } } type Runner<Env> = crate::Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>>; fn runner() -> Runner<()> { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); crate::Runner::builder(()) .database_url(database_url) .thread_count(2) .build() } fn create_dummy_job(runner: &Runner<()>) -> storage::BackgroundJob { ::diesel::insert_into(background_jobs) .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) .returning((id, job_type, data)) .get_result(&*runner.connection().unwrap()) .unwrap() } }
Runner
identifier_name
runner.rs
use diesel::prelude::*; #[cfg(feature = "r2d2")] use diesel::r2d2; use std::any::Any; use std::error::Error; use std::panic::{catch_unwind, AssertUnwindSafe, PanicInfo, RefUnwindSafe, UnwindSafe}; use std::sync::Arc; use std::time::Duration; use threadpool::ThreadPool; use crate::db::*; use crate::errors::*; use crate::{storage, Registry}; use event::*; mod channel; mod event; pub struct NoConnectionPoolGiven; #[allow(missing_debug_implementations)] pub struct Builder<Env, ConnectionPoolBuilder> { connection_pool_or_builder: ConnectionPoolBuilder, environment: Env, thread_count: Option<usize>, job_start_timeout: Option<Duration>, } impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Set the number of threads to be used to run jobs concurrently. /// /// Defaults to 5 pub fn thread_count(mut self, thread_count: usize) -> Self { self.thread_count = Some(thread_count); self } fn get_thread_count(&self) -> usize { self.thread_count.unwrap_or(5) } /// The amount of time to wait for a job to start before assuming an error /// has occurred. /// /// Defaults to 10 seconds. pub fn job_start_timeout(mut self, timeout: Duration) -> Self { self.job_start_timeout = Some(timeout); self } /// Provide a connection pool to be used by the runner pub fn connection_pool<NewPool>(self, pool: NewPool) -> Builder<Env, NewPool> { Builder { connection_pool_or_builder: pool, environment: self.environment, thread_count: self.thread_count, job_start_timeout: self.job_start_timeout, } } } #[cfg(feature = "r2d2")] impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Build the runner with an r2d2 connection pool /// /// This will override any connection pool previously provided pub fn database_url<S: Into<String>>(self, database_url: S) -> Builder<Env, R2d2Builder> { self.connection_pool_builder(database_url, r2d2::Builder::new()) } /// Provide a connection pool builder. /// /// This will override any connection pool previously provided. /// /// You should call this method if you want to provide additional /// configuration for the database connection pool. The builder will be /// configured to have its max size set to the value given to `2 * thread_count`. /// To override this behavior, call [`connection_count`](Self::connection_count) pub fn connection_pool_builder<S: Into<String>>( self, database_url: S, builder: r2d2::Builder<r2d2::ConnectionManager<PgConnection>>, ) -> Builder<Env, R2d2Builder> { self.connection_pool(R2d2Builder::new(database_url.into(), builder)) } } #[cfg(feature = "r2d2")] impl<Env> Builder<Env, R2d2Builder> { /// Set the max size of the database connection pool pub fn connection_count(mut self, connection_count: u32) -> Self { self.connection_pool_or_builder .connection_count(connection_count); self } /// Build the runner with an r2d2 connection pool. pub fn build(self) -> Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>> { let thread_count = self.get_thread_count(); let connection_pool_size = thread_count as u32 * 2; let connection_pool = self.connection_pool_or_builder.build(connection_pool_size); Runner { connection_pool, thread_pool: ThreadPool::new(thread_count), environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } impl<Env, ConnectionPool> Builder<Env, ConnectionPool> where ConnectionPool: DieselPool, { /// Build the runner pub fn build(self) -> Runner<Env, ConnectionPool> { Runner { thread_pool: ThreadPool::new(self.get_thread_count()), connection_pool: self.connection_pool_or_builder, environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } #[allow(missing_debug_implementations)] /// The core runner responsible for locking and running jobs pub struct Runner<Env:'static, ConnectionPool> { connection_pool: ConnectionPool, thread_pool: ThreadPool, environment: Arc<Env>, registry: Arc<Registry<Env>>, job_start_timeout: Duration, } impl<Env> Runner<Env, NoConnectionPoolGiven> { /// Create a builder for a job runner /// /// This method takes the two required configurations, the database /// connection pool, and the environment to pass to your jobs. If your /// environment contains a connection pool, it should be the same pool given /// here. pub fn builder(environment: Env) -> Builder<Env, NoConnectionPoolGiven> { Builder { connection_pool_or_builder: NoConnectionPoolGiven, environment, thread_count: None, job_start_timeout: None, } } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> { #[doc(hidden)] /// For use in integration tests pub fn connection_pool(&self) -> &ConnectionPool { &self.connection_pool } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> where Env: RefUnwindSafe + Send + Sync +'static, ConnectionPool: DieselPool +'static, { /// Runs all pending jobs in the queue /// /// This function will return once all jobs in the queue have begun running, /// but does not wait for them to complete. When this function returns, at /// least one thread will have tried to acquire a new job, and found there /// were none in the queue. pub fn run_all_pending_jobs(&self) -> Result<(), FetchError<ConnectionPool>> { use std::cmp::max; let max_threads = self.thread_pool.max_count(); let (sender, receiver) = channel::new(max_threads); let mut pending_messages = 0; loop { let available_threads = max_threads - self.thread_pool.active_count(); let jobs_to_queue = if pending_messages == 0 { // If we have no queued jobs talking to us, and there are no // available threads, we still need to queue at least one job // or we'll never receive a message max(available_threads, 1) } else { available_threads }; for _ in 0..jobs_to_queue { self.run_single_job(sender.clone()); } pending_messages += jobs_to_queue; match receiver.recv_timeout(self.job_start_timeout) { Ok(Event::Working) => pending_messages -= 1, Ok(Event::NoJobAvailable) => return Ok(()), Ok(Event::ErrorLoadingJob(e)) => return Err(FetchError::FailedLoadingJob(e)), Ok(Event::FailedToAcquireConnection(e)) => { return Err(FetchError::NoDatabaseConnection(e)); } Err(_) => return Err(FetchError::NoMessageReceived), } } } fn run_single_job(&self, sender: EventSender<ConnectionPool>) { let environment = Arc::clone(&self.environment); let registry = Arc::clone(&self.registry); // FIXME: https://github.com/sfackler/r2d2/pull/70 let connection_pool = AssertUnwindSafe(self.connection_pool().clone()); self.get_single_job(sender, move |job| { let perform_job = registry .get(&job.job_type) .ok_or_else(|| PerformError::from(format!("Unknown job type {}", job.job_type)))?; perform_job.perform(job.data, &environment, &connection_pool.0) }) } fn get_single_job<F>(&self, sender: EventSender<ConnectionPool>, f: F) where F: FnOnce(storage::BackgroundJob) -> Result<(), PerformError> + Send + UnwindSafe +'static, { use diesel::result::Error::RollbackTransaction; // The connection may not be `Send` so we need to clone the pool instead let pool = self.connection_pool.clone(); self.thread_pool.execute(move || { let conn = match pool.get() { Ok(conn) => conn, Err(e) => { sender.send(Event::FailedToAcquireConnection(e)); return; } }; let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { let job = match storage::find_next_unlocked_job(&conn).optional() { Ok(Some(j)) => { sender.send(Event::Working); j } Ok(None) => { sender.send(Event::NoJobAvailable); return Ok(()); } Err(e) => { sender.send(Event::ErrorLoadingJob(e)); return Err(RollbackTransaction); } }; let job_id = job.id; let result = catch_unwind(|| f(job)) .map_err(|e| try_to_extract_panic_info(&e)) .and_then(|r| r); match result { Ok(_) => storage::delete_successful_job(&conn, job_id)?, Err(e) =>
} Ok(()) }); match job_run_result { Ok(_) | Err(RollbackTransaction) => {} Err(e) => { panic!("Failed to update job: {:?}", e); } } }) } fn connection(&self) -> Result<DieselPooledConn<ConnectionPool>, Box<dyn Error + Send + Sync>> { self.connection_pool.get().map_err(Into::into) } /// Waits for all running jobs to complete, and returns an error if any /// failed /// /// This function is intended for use in tests. If any jobs have failed, it /// will return `swirl::JobsFailed` with the number of jobs that failed. /// /// If any other unexpected errors occurred, such as panicked worker threads /// or an error loading the job count from the database, an opaque error /// will be returned. pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { self.wait_for_jobs()?; let failed_jobs = storage::failed_job_count(&*self.connection()?)?; if failed_jobs == 0 { Ok(()) } else { Err(JobsFailed(failed_jobs)) } } fn wait_for_jobs(&self) -> Result<(), Box<dyn Error + Send + Sync>> { self.thread_pool.join(); let panic_count = self.thread_pool.panic_count(); if panic_count == 0 { Ok(()) } else { Err(format!("{} threads panicked", panic_count).into()) } } } /// Try to figure out what's in the box, and print it if we can. /// /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. /// However, the `panic::set_hook` functions deal with a `PanicInfo` type, and its payload is /// documented as "commonly but not always `&'static str` or `String`". So we can try all of those, /// and give up if we didn't get one of those three types. fn try_to_extract_panic_info(info: &(dyn Any + Send +'static)) -> PerformError { if let Some(x) = info.downcast_ref::<PanicInfo>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<&'static str>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<String>() { format!("job panicked: {}", x).into() } else { "job panicked".into() } } #[cfg(test)] mod tests { use diesel::prelude::*; use diesel::r2d2; use super::*; use crate::schema::background_jobs::dsl::*; use std::panic::AssertUnwindSafe; use std::sync::{Arc, Barrier, Mutex, MutexGuard}; #[test] fn jobs_are_locked_when_fetched() { let _guard = TestGuard::lock(); let runner = runner(); let first_job_id = create_dummy_job(&runner).id; let second_job_id = create_dummy_job(&runner).id; let fetch_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let fetch_barrier2 = fetch_barrier.clone(); let return_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let return_barrier2 = return_barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |job| { fetch_barrier.0.wait(); // Tell thread 2 it can lock its job assert_eq!(first_job_id, job.id); return_barrier.0.wait(); // Wait for thread 2 to lock its job Ok(()) }); fetch_barrier2.0.wait(); // Wait until thread 1 locks its job runner.get_single_job(channel::dummy_sender(), move |job| { assert_eq!(second_job_id, job.id); return_barrier2.0.wait(); // Tell thread 1 it can unlock its job Ok(()) }); runner.wait_for_jobs().unwrap(); } #[test] fn jobs_are_deleted_when_successfully_run() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); runner.get_single_job(channel::dummy_sender(), |_| Ok(())); runner.wait_for_jobs().unwrap(); let remaining_jobs = background_jobs .count() .get_result(&*runner.connection().unwrap()); assert_eq!(Ok(0), remaining_jobs); } #[test] fn failed_jobs_do_not_release_lock_before_updating_retry_time() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); let barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let barrier2 = barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |_| { barrier.0.wait(); // error so the job goes back into the queue Err("nope".into()) }); let conn = runner.connection().unwrap(); // Wait for the first thread to acquire the lock barrier2.0.wait(); // We are intentionally not using `get_single_job` here. // `SKIP LOCKED` is intentionally omitted here, so we block until // the lock on the first job is released. // If there is any point where the row is unlocked, but the retry // count is not updated, we will get a row here. let available_jobs = background_jobs .select(id) .filter(retries.eq(0)) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(0, available_jobs.len()); // Sanity check to make sure the job actually is there let total_jobs_including_failed = background_jobs .select(id) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(1, total_jobs_including_failed.len()); runner.wait_for_jobs().unwrap(); } #[test] fn panicking_in_jobs_updates_retry_counter() { let _guard = TestGuard::lock(); let runner = runner(); let job_id = create_dummy_job(&runner).id; runner.get_single_job(channel::dummy_sender(), |_| panic!()); runner.wait_for_jobs().unwrap(); let tries = background_jobs .find(job_id) .select(retries) .for_update() .first::<i32>(&*runner.connection().unwrap()) .unwrap(); assert_eq!(1, tries); } lazy_static::lazy_static! { // Since these tests deal with behavior concerning multiple connections // running concurrently, they have to run outside of a transaction. // Therefore we can't run more than one at a time. // // Rather than forcing the whole suite to be run with `--test-threads 1`, // we just lock these tests instead. static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } struct TestGuard<'a>(MutexGuard<'a, ()>); impl<'a> TestGuard<'a> { fn lock() -> Self { TestGuard(TEST_MUTEX.lock().unwrap()) } } impl<'a> Drop for TestGuard<'a> { fn drop(&mut self) { ::diesel::sql_query("TRUNCATE TABLE background_jobs") .execute(&*runner().connection().unwrap()) .unwrap(); } } type Runner<Env> = crate::Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>>; fn runner() -> Runner<()> { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); crate::Runner::builder(()) .database_url(database_url) .thread_count(2) .build() } fn create_dummy_job(runner: &Runner<()>) -> storage::BackgroundJob { ::diesel::insert_into(background_jobs) .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) .returning((id, job_type, data)) .get_result(&*runner.connection().unwrap()) .unwrap() } }
{ eprintln!("Job {} failed to run: {}", job_id, e); storage::update_failed_job(&conn, job_id); }
conditional_block
runner.rs
use diesel::prelude::*; #[cfg(feature = "r2d2")] use diesel::r2d2; use std::any::Any; use std::error::Error; use std::panic::{catch_unwind, AssertUnwindSafe, PanicInfo, RefUnwindSafe, UnwindSafe}; use std::sync::Arc; use std::time::Duration; use threadpool::ThreadPool; use crate::db::*; use crate::errors::*; use crate::{storage, Registry}; use event::*; mod channel; mod event; pub struct NoConnectionPoolGiven; #[allow(missing_debug_implementations)] pub struct Builder<Env, ConnectionPoolBuilder> { connection_pool_or_builder: ConnectionPoolBuilder, environment: Env, thread_count: Option<usize>, job_start_timeout: Option<Duration>, } impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Set the number of threads to be used to run jobs concurrently. /// /// Defaults to 5 pub fn thread_count(mut self, thread_count: usize) -> Self { self.thread_count = Some(thread_count); self } fn get_thread_count(&self) -> usize
/// The amount of time to wait for a job to start before assuming an error /// has occurred. /// /// Defaults to 10 seconds. pub fn job_start_timeout(mut self, timeout: Duration) -> Self { self.job_start_timeout = Some(timeout); self } /// Provide a connection pool to be used by the runner pub fn connection_pool<NewPool>(self, pool: NewPool) -> Builder<Env, NewPool> { Builder { connection_pool_or_builder: pool, environment: self.environment, thread_count: self.thread_count, job_start_timeout: self.job_start_timeout, } } } #[cfg(feature = "r2d2")] impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Build the runner with an r2d2 connection pool /// /// This will override any connection pool previously provided pub fn database_url<S: Into<String>>(self, database_url: S) -> Builder<Env, R2d2Builder> { self.connection_pool_builder(database_url, r2d2::Builder::new()) } /// Provide a connection pool builder. /// /// This will override any connection pool previously provided. /// /// You should call this method if you want to provide additional /// configuration for the database connection pool. The builder will be /// configured to have its max size set to the value given to `2 * thread_count`. /// To override this behavior, call [`connection_count`](Self::connection_count) pub fn connection_pool_builder<S: Into<String>>( self, database_url: S, builder: r2d2::Builder<r2d2::ConnectionManager<PgConnection>>, ) -> Builder<Env, R2d2Builder> { self.connection_pool(R2d2Builder::new(database_url.into(), builder)) } } #[cfg(feature = "r2d2")] impl<Env> Builder<Env, R2d2Builder> { /// Set the max size of the database connection pool pub fn connection_count(mut self, connection_count: u32) -> Self { self.connection_pool_or_builder .connection_count(connection_count); self } /// Build the runner with an r2d2 connection pool. pub fn build(self) -> Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>> { let thread_count = self.get_thread_count(); let connection_pool_size = thread_count as u32 * 2; let connection_pool = self.connection_pool_or_builder.build(connection_pool_size); Runner { connection_pool, thread_pool: ThreadPool::new(thread_count), environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } impl<Env, ConnectionPool> Builder<Env, ConnectionPool> where ConnectionPool: DieselPool, { /// Build the runner pub fn build(self) -> Runner<Env, ConnectionPool> { Runner { thread_pool: ThreadPool::new(self.get_thread_count()), connection_pool: self.connection_pool_or_builder, environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } #[allow(missing_debug_implementations)] /// The core runner responsible for locking and running jobs pub struct Runner<Env:'static, ConnectionPool> { connection_pool: ConnectionPool, thread_pool: ThreadPool, environment: Arc<Env>, registry: Arc<Registry<Env>>, job_start_timeout: Duration, } impl<Env> Runner<Env, NoConnectionPoolGiven> { /// Create a builder for a job runner /// /// This method takes the two required configurations, the database /// connection pool, and the environment to pass to your jobs. If your /// environment contains a connection pool, it should be the same pool given /// here. pub fn builder(environment: Env) -> Builder<Env, NoConnectionPoolGiven> { Builder { connection_pool_or_builder: NoConnectionPoolGiven, environment, thread_count: None, job_start_timeout: None, } } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> { #[doc(hidden)] /// For use in integration tests pub fn connection_pool(&self) -> &ConnectionPool { &self.connection_pool } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> where Env: RefUnwindSafe + Send + Sync +'static, ConnectionPool: DieselPool +'static, { /// Runs all pending jobs in the queue /// /// This function will return once all jobs in the queue have begun running, /// but does not wait for them to complete. When this function returns, at /// least one thread will have tried to acquire a new job, and found there /// were none in the queue. pub fn run_all_pending_jobs(&self) -> Result<(), FetchError<ConnectionPool>> { use std::cmp::max; let max_threads = self.thread_pool.max_count(); let (sender, receiver) = channel::new(max_threads); let mut pending_messages = 0; loop { let available_threads = max_threads - self.thread_pool.active_count(); let jobs_to_queue = if pending_messages == 0 { // If we have no queued jobs talking to us, and there are no // available threads, we still need to queue at least one job // or we'll never receive a message max(available_threads, 1) } else { available_threads }; for _ in 0..jobs_to_queue { self.run_single_job(sender.clone()); } pending_messages += jobs_to_queue; match receiver.recv_timeout(self.job_start_timeout) { Ok(Event::Working) => pending_messages -= 1, Ok(Event::NoJobAvailable) => return Ok(()), Ok(Event::ErrorLoadingJob(e)) => return Err(FetchError::FailedLoadingJob(e)), Ok(Event::FailedToAcquireConnection(e)) => { return Err(FetchError::NoDatabaseConnection(e)); } Err(_) => return Err(FetchError::NoMessageReceived), } } } fn run_single_job(&self, sender: EventSender<ConnectionPool>) { let environment = Arc::clone(&self.environment); let registry = Arc::clone(&self.registry); // FIXME: https://github.com/sfackler/r2d2/pull/70 let connection_pool = AssertUnwindSafe(self.connection_pool().clone()); self.get_single_job(sender, move |job| { let perform_job = registry .get(&job.job_type) .ok_or_else(|| PerformError::from(format!("Unknown job type {}", job.job_type)))?; perform_job.perform(job.data, &environment, &connection_pool.0) }) } fn get_single_job<F>(&self, sender: EventSender<ConnectionPool>, f: F) where F: FnOnce(storage::BackgroundJob) -> Result<(), PerformError> + Send + UnwindSafe +'static, { use diesel::result::Error::RollbackTransaction; // The connection may not be `Send` so we need to clone the pool instead let pool = self.connection_pool.clone(); self.thread_pool.execute(move || { let conn = match pool.get() { Ok(conn) => conn, Err(e) => { sender.send(Event::FailedToAcquireConnection(e)); return; } }; let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { let job = match storage::find_next_unlocked_job(&conn).optional() { Ok(Some(j)) => { sender.send(Event::Working); j } Ok(None) => { sender.send(Event::NoJobAvailable); return Ok(()); } Err(e) => { sender.send(Event::ErrorLoadingJob(e)); return Err(RollbackTransaction); } }; let job_id = job.id; let result = catch_unwind(|| f(job)) .map_err(|e| try_to_extract_panic_info(&e)) .and_then(|r| r); match result { Ok(_) => storage::delete_successful_job(&conn, job_id)?, Err(e) => { eprintln!("Job {} failed to run: {}", job_id, e); storage::update_failed_job(&conn, job_id); } } Ok(()) }); match job_run_result { Ok(_) | Err(RollbackTransaction) => {} Err(e) => { panic!("Failed to update job: {:?}", e); } } }) } fn connection(&self) -> Result<DieselPooledConn<ConnectionPool>, Box<dyn Error + Send + Sync>> { self.connection_pool.get().map_err(Into::into) } /// Waits for all running jobs to complete, and returns an error if any /// failed /// /// This function is intended for use in tests. If any jobs have failed, it /// will return `swirl::JobsFailed` with the number of jobs that failed. /// /// If any other unexpected errors occurred, such as panicked worker threads /// or an error loading the job count from the database, an opaque error /// will be returned. pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { self.wait_for_jobs()?; let failed_jobs = storage::failed_job_count(&*self.connection()?)?; if failed_jobs == 0 { Ok(()) } else { Err(JobsFailed(failed_jobs)) } } fn wait_for_jobs(&self) -> Result<(), Box<dyn Error + Send + Sync>> { self.thread_pool.join(); let panic_count = self.thread_pool.panic_count(); if panic_count == 0 { Ok(()) } else { Err(format!("{} threads panicked", panic_count).into()) } } } /// Try to figure out what's in the box, and print it if we can. /// /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. /// However, the `panic::set_hook` functions deal with a `PanicInfo` type, and its payload is /// documented as "commonly but not always `&'static str` or `String`". So we can try all of those, /// and give up if we didn't get one of those three types. fn try_to_extract_panic_info(info: &(dyn Any + Send +'static)) -> PerformError { if let Some(x) = info.downcast_ref::<PanicInfo>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<&'static str>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<String>() { format!("job panicked: {}", x).into() } else { "job panicked".into() } } #[cfg(test)] mod tests { use diesel::prelude::*; use diesel::r2d2; use super::*; use crate::schema::background_jobs::dsl::*; use std::panic::AssertUnwindSafe; use std::sync::{Arc, Barrier, Mutex, MutexGuard}; #[test] fn jobs_are_locked_when_fetched() { let _guard = TestGuard::lock(); let runner = runner(); let first_job_id = create_dummy_job(&runner).id; let second_job_id = create_dummy_job(&runner).id; let fetch_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let fetch_barrier2 = fetch_barrier.clone(); let return_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let return_barrier2 = return_barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |job| { fetch_barrier.0.wait(); // Tell thread 2 it can lock its job assert_eq!(first_job_id, job.id); return_barrier.0.wait(); // Wait for thread 2 to lock its job Ok(()) }); fetch_barrier2.0.wait(); // Wait until thread 1 locks its job runner.get_single_job(channel::dummy_sender(), move |job| { assert_eq!(second_job_id, job.id); return_barrier2.0.wait(); // Tell thread 1 it can unlock its job Ok(()) }); runner.wait_for_jobs().unwrap(); } #[test] fn jobs_are_deleted_when_successfully_run() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); runner.get_single_job(channel::dummy_sender(), |_| Ok(())); runner.wait_for_jobs().unwrap(); let remaining_jobs = background_jobs .count() .get_result(&*runner.connection().unwrap()); assert_eq!(Ok(0), remaining_jobs); } #[test] fn failed_jobs_do_not_release_lock_before_updating_retry_time() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); let barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let barrier2 = barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |_| { barrier.0.wait(); // error so the job goes back into the queue Err("nope".into()) }); let conn = runner.connection().unwrap(); // Wait for the first thread to acquire the lock barrier2.0.wait(); // We are intentionally not using `get_single_job` here. // `SKIP LOCKED` is intentionally omitted here, so we block until // the lock on the first job is released. // If there is any point where the row is unlocked, but the retry // count is not updated, we will get a row here. let available_jobs = background_jobs .select(id) .filter(retries.eq(0)) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(0, available_jobs.len()); // Sanity check to make sure the job actually is there let total_jobs_including_failed = background_jobs .select(id) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(1, total_jobs_including_failed.len()); runner.wait_for_jobs().unwrap(); } #[test] fn panicking_in_jobs_updates_retry_counter() { let _guard = TestGuard::lock(); let runner = runner(); let job_id = create_dummy_job(&runner).id; runner.get_single_job(channel::dummy_sender(), |_| panic!()); runner.wait_for_jobs().unwrap(); let tries = background_jobs .find(job_id) .select(retries) .for_update() .first::<i32>(&*runner.connection().unwrap()) .unwrap(); assert_eq!(1, tries); } lazy_static::lazy_static! { // Since these tests deal with behavior concerning multiple connections // running concurrently, they have to run outside of a transaction. // Therefore we can't run more than one at a time. // // Rather than forcing the whole suite to be run with `--test-threads 1`, // we just lock these tests instead. static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } struct TestGuard<'a>(MutexGuard<'a, ()>); impl<'a> TestGuard<'a> { fn lock() -> Self { TestGuard(TEST_MUTEX.lock().unwrap()) } } impl<'a> Drop for TestGuard<'a> { fn drop(&mut self) { ::diesel::sql_query("TRUNCATE TABLE background_jobs") .execute(&*runner().connection().unwrap()) .unwrap(); } } type Runner<Env> = crate::Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>>; fn runner() -> Runner<()> { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); crate::Runner::builder(()) .database_url(database_url) .thread_count(2) .build() } fn create_dummy_job(runner: &Runner<()>) -> storage::BackgroundJob { ::diesel::insert_into(background_jobs) .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) .returning((id, job_type, data)) .get_result(&*runner.connection().unwrap()) .unwrap() } }
{ self.thread_count.unwrap_or(5) }
identifier_body
runner.rs
use diesel::prelude::*; #[cfg(feature = "r2d2")] use diesel::r2d2; use std::any::Any; use std::error::Error; use std::panic::{catch_unwind, AssertUnwindSafe, PanicInfo, RefUnwindSafe, UnwindSafe}; use std::sync::Arc; use std::time::Duration; use threadpool::ThreadPool; use crate::db::*; use crate::errors::*; use crate::{storage, Registry}; use event::*; mod channel; mod event; pub struct NoConnectionPoolGiven; #[allow(missing_debug_implementations)] pub struct Builder<Env, ConnectionPoolBuilder> { connection_pool_or_builder: ConnectionPoolBuilder, environment: Env, thread_count: Option<usize>, job_start_timeout: Option<Duration>, } impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Set the number of threads to be used to run jobs concurrently. /// /// Defaults to 5 pub fn thread_count(mut self, thread_count: usize) -> Self { self.thread_count = Some(thread_count); self } fn get_thread_count(&self) -> usize { self.thread_count.unwrap_or(5) } /// The amount of time to wait for a job to start before assuming an error /// has occurred. /// /// Defaults to 10 seconds. pub fn job_start_timeout(mut self, timeout: Duration) -> Self { self.job_start_timeout = Some(timeout); self } /// Provide a connection pool to be used by the runner pub fn connection_pool<NewPool>(self, pool: NewPool) -> Builder<Env, NewPool> { Builder { connection_pool_or_builder: pool, environment: self.environment, thread_count: self.thread_count, job_start_timeout: self.job_start_timeout, } } } #[cfg(feature = "r2d2")] impl<Env, ConnectionPoolBuilder> Builder<Env, ConnectionPoolBuilder> { /// Build the runner with an r2d2 connection pool /// /// This will override any connection pool previously provided pub fn database_url<S: Into<String>>(self, database_url: S) -> Builder<Env, R2d2Builder> { self.connection_pool_builder(database_url, r2d2::Builder::new()) } /// Provide a connection pool builder. /// /// This will override any connection pool previously provided. /// /// You should call this method if you want to provide additional /// configuration for the database connection pool. The builder will be /// configured to have its max size set to the value given to `2 * thread_count`. /// To override this behavior, call [`connection_count`](Self::connection_count) pub fn connection_pool_builder<S: Into<String>>( self, database_url: S, builder: r2d2::Builder<r2d2::ConnectionManager<PgConnection>>, ) -> Builder<Env, R2d2Builder> { self.connection_pool(R2d2Builder::new(database_url.into(), builder)) } } #[cfg(feature = "r2d2")] impl<Env> Builder<Env, R2d2Builder> { /// Set the max size of the database connection pool pub fn connection_count(mut self, connection_count: u32) -> Self { self.connection_pool_or_builder .connection_count(connection_count); self } /// Build the runner with an r2d2 connection pool. pub fn build(self) -> Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>> { let thread_count = self.get_thread_count(); let connection_pool_size = thread_count as u32 * 2; let connection_pool = self.connection_pool_or_builder.build(connection_pool_size); Runner { connection_pool, thread_pool: ThreadPool::new(thread_count), environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } impl<Env, ConnectionPool> Builder<Env, ConnectionPool> where ConnectionPool: DieselPool, { /// Build the runner pub fn build(self) -> Runner<Env, ConnectionPool> { Runner { thread_pool: ThreadPool::new(self.get_thread_count()), connection_pool: self.connection_pool_or_builder, environment: Arc::new(self.environment), registry: Arc::new(Registry::load()), job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), } } } #[allow(missing_debug_implementations)] /// The core runner responsible for locking and running jobs pub struct Runner<Env:'static, ConnectionPool> { connection_pool: ConnectionPool, thread_pool: ThreadPool,
environment: Arc<Env>, registry: Arc<Registry<Env>>, job_start_timeout: Duration, } impl<Env> Runner<Env, NoConnectionPoolGiven> { /// Create a builder for a job runner /// /// This method takes the two required configurations, the database /// connection pool, and the environment to pass to your jobs. If your /// environment contains a connection pool, it should be the same pool given /// here. pub fn builder(environment: Env) -> Builder<Env, NoConnectionPoolGiven> { Builder { connection_pool_or_builder: NoConnectionPoolGiven, environment, thread_count: None, job_start_timeout: None, } } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> { #[doc(hidden)] /// For use in integration tests pub fn connection_pool(&self) -> &ConnectionPool { &self.connection_pool } } impl<Env, ConnectionPool> Runner<Env, ConnectionPool> where Env: RefUnwindSafe + Send + Sync +'static, ConnectionPool: DieselPool +'static, { /// Runs all pending jobs in the queue /// /// This function will return once all jobs in the queue have begun running, /// but does not wait for them to complete. When this function returns, at /// least one thread will have tried to acquire a new job, and found there /// were none in the queue. pub fn run_all_pending_jobs(&self) -> Result<(), FetchError<ConnectionPool>> { use std::cmp::max; let max_threads = self.thread_pool.max_count(); let (sender, receiver) = channel::new(max_threads); let mut pending_messages = 0; loop { let available_threads = max_threads - self.thread_pool.active_count(); let jobs_to_queue = if pending_messages == 0 { // If we have no queued jobs talking to us, and there are no // available threads, we still need to queue at least one job // or we'll never receive a message max(available_threads, 1) } else { available_threads }; for _ in 0..jobs_to_queue { self.run_single_job(sender.clone()); } pending_messages += jobs_to_queue; match receiver.recv_timeout(self.job_start_timeout) { Ok(Event::Working) => pending_messages -= 1, Ok(Event::NoJobAvailable) => return Ok(()), Ok(Event::ErrorLoadingJob(e)) => return Err(FetchError::FailedLoadingJob(e)), Ok(Event::FailedToAcquireConnection(e)) => { return Err(FetchError::NoDatabaseConnection(e)); } Err(_) => return Err(FetchError::NoMessageReceived), } } } fn run_single_job(&self, sender: EventSender<ConnectionPool>) { let environment = Arc::clone(&self.environment); let registry = Arc::clone(&self.registry); // FIXME: https://github.com/sfackler/r2d2/pull/70 let connection_pool = AssertUnwindSafe(self.connection_pool().clone()); self.get_single_job(sender, move |job| { let perform_job = registry .get(&job.job_type) .ok_or_else(|| PerformError::from(format!("Unknown job type {}", job.job_type)))?; perform_job.perform(job.data, &environment, &connection_pool.0) }) } fn get_single_job<F>(&self, sender: EventSender<ConnectionPool>, f: F) where F: FnOnce(storage::BackgroundJob) -> Result<(), PerformError> + Send + UnwindSafe +'static, { use diesel::result::Error::RollbackTransaction; // The connection may not be `Send` so we need to clone the pool instead let pool = self.connection_pool.clone(); self.thread_pool.execute(move || { let conn = match pool.get() { Ok(conn) => conn, Err(e) => { sender.send(Event::FailedToAcquireConnection(e)); return; } }; let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { let job = match storage::find_next_unlocked_job(&conn).optional() { Ok(Some(j)) => { sender.send(Event::Working); j } Ok(None) => { sender.send(Event::NoJobAvailable); return Ok(()); } Err(e) => { sender.send(Event::ErrorLoadingJob(e)); return Err(RollbackTransaction); } }; let job_id = job.id; let result = catch_unwind(|| f(job)) .map_err(|e| try_to_extract_panic_info(&e)) .and_then(|r| r); match result { Ok(_) => storage::delete_successful_job(&conn, job_id)?, Err(e) => { eprintln!("Job {} failed to run: {}", job_id, e); storage::update_failed_job(&conn, job_id); } } Ok(()) }); match job_run_result { Ok(_) | Err(RollbackTransaction) => {} Err(e) => { panic!("Failed to update job: {:?}", e); } } }) } fn connection(&self) -> Result<DieselPooledConn<ConnectionPool>, Box<dyn Error + Send + Sync>> { self.connection_pool.get().map_err(Into::into) } /// Waits for all running jobs to complete, and returns an error if any /// failed /// /// This function is intended for use in tests. If any jobs have failed, it /// will return `swirl::JobsFailed` with the number of jobs that failed. /// /// If any other unexpected errors occurred, such as panicked worker threads /// or an error loading the job count from the database, an opaque error /// will be returned. pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { self.wait_for_jobs()?; let failed_jobs = storage::failed_job_count(&*self.connection()?)?; if failed_jobs == 0 { Ok(()) } else { Err(JobsFailed(failed_jobs)) } } fn wait_for_jobs(&self) -> Result<(), Box<dyn Error + Send + Sync>> { self.thread_pool.join(); let panic_count = self.thread_pool.panic_count(); if panic_count == 0 { Ok(()) } else { Err(format!("{} threads panicked", panic_count).into()) } } } /// Try to figure out what's in the box, and print it if we can. /// /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. /// However, the `panic::set_hook` functions deal with a `PanicInfo` type, and its payload is /// documented as "commonly but not always `&'static str` or `String`". So we can try all of those, /// and give up if we didn't get one of those three types. fn try_to_extract_panic_info(info: &(dyn Any + Send +'static)) -> PerformError { if let Some(x) = info.downcast_ref::<PanicInfo>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<&'static str>() { format!("job panicked: {}", x).into() } else if let Some(x) = info.downcast_ref::<String>() { format!("job panicked: {}", x).into() } else { "job panicked".into() } } #[cfg(test)] mod tests { use diesel::prelude::*; use diesel::r2d2; use super::*; use crate::schema::background_jobs::dsl::*; use std::panic::AssertUnwindSafe; use std::sync::{Arc, Barrier, Mutex, MutexGuard}; #[test] fn jobs_are_locked_when_fetched() { let _guard = TestGuard::lock(); let runner = runner(); let first_job_id = create_dummy_job(&runner).id; let second_job_id = create_dummy_job(&runner).id; let fetch_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let fetch_barrier2 = fetch_barrier.clone(); let return_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let return_barrier2 = return_barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |job| { fetch_barrier.0.wait(); // Tell thread 2 it can lock its job assert_eq!(first_job_id, job.id); return_barrier.0.wait(); // Wait for thread 2 to lock its job Ok(()) }); fetch_barrier2.0.wait(); // Wait until thread 1 locks its job runner.get_single_job(channel::dummy_sender(), move |job| { assert_eq!(second_job_id, job.id); return_barrier2.0.wait(); // Tell thread 1 it can unlock its job Ok(()) }); runner.wait_for_jobs().unwrap(); } #[test] fn jobs_are_deleted_when_successfully_run() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); runner.get_single_job(channel::dummy_sender(), |_| Ok(())); runner.wait_for_jobs().unwrap(); let remaining_jobs = background_jobs .count() .get_result(&*runner.connection().unwrap()); assert_eq!(Ok(0), remaining_jobs); } #[test] fn failed_jobs_do_not_release_lock_before_updating_retry_time() { let _guard = TestGuard::lock(); let runner = runner(); create_dummy_job(&runner); let barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); let barrier2 = barrier.clone(); runner.get_single_job(channel::dummy_sender(), move |_| { barrier.0.wait(); // error so the job goes back into the queue Err("nope".into()) }); let conn = runner.connection().unwrap(); // Wait for the first thread to acquire the lock barrier2.0.wait(); // We are intentionally not using `get_single_job` here. // `SKIP LOCKED` is intentionally omitted here, so we block until // the lock on the first job is released. // If there is any point where the row is unlocked, but the retry // count is not updated, we will get a row here. let available_jobs = background_jobs .select(id) .filter(retries.eq(0)) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(0, available_jobs.len()); // Sanity check to make sure the job actually is there let total_jobs_including_failed = background_jobs .select(id) .for_update() .load::<i64>(&*conn) .unwrap(); assert_eq!(1, total_jobs_including_failed.len()); runner.wait_for_jobs().unwrap(); } #[test] fn panicking_in_jobs_updates_retry_counter() { let _guard = TestGuard::lock(); let runner = runner(); let job_id = create_dummy_job(&runner).id; runner.get_single_job(channel::dummy_sender(), |_| panic!()); runner.wait_for_jobs().unwrap(); let tries = background_jobs .find(job_id) .select(retries) .for_update() .first::<i32>(&*runner.connection().unwrap()) .unwrap(); assert_eq!(1, tries); } lazy_static::lazy_static! { // Since these tests deal with behavior concerning multiple connections // running concurrently, they have to run outside of a transaction. // Therefore we can't run more than one at a time. // // Rather than forcing the whole suite to be run with `--test-threads 1`, // we just lock these tests instead. static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); } struct TestGuard<'a>(MutexGuard<'a, ()>); impl<'a> TestGuard<'a> { fn lock() -> Self { TestGuard(TEST_MUTEX.lock().unwrap()) } } impl<'a> Drop for TestGuard<'a> { fn drop(&mut self) { ::diesel::sql_query("TRUNCATE TABLE background_jobs") .execute(&*runner().connection().unwrap()) .unwrap(); } } type Runner<Env> = crate::Runner<Env, r2d2::Pool<r2d2::ConnectionManager<PgConnection>>>; fn runner() -> Runner<()> { let database_url = dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); crate::Runner::builder(()) .database_url(database_url) .thread_count(2) .build() } fn create_dummy_job(runner: &Runner<()>) -> storage::BackgroundJob { ::diesel::insert_into(background_jobs) .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) .returning((id, job_type, data)) .get_result(&*runner.connection().unwrap()) .unwrap() } }
random_line_split
lib.rs
//! The Pikelet Compiler //! //! # Compiler Architecture //! //! In order to create a separation of concerns, we break up our compiler into many //! small stages, beginning with a source string, and ultimately ending up with //! compiled machine code. //! //! Below is a rough flow chart showing how source strings are currently lexed, //! parsed, desugared, and type checked/elaborated: //! //! ```bob //! .------------. //! | String | //! '------------' //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Frontend | //! | //! pikelet_concrete::parse::lexer //! | //! v //! .---------------------------------------. //! | pikelet_concrete::parse::lexer::Token | //! '---------------------------------------' //! | //! pikelet_concrete::parse //! | //! v //! .------------------------------------------. //! | pikelet_concrete::syntax::concrete::Term |---------> Code formatter (TODO) //! '------------------------------------------' //! | //! pikelet_concrete::desugar //! | //! v //! .-------------------------------------. //! | pikelet_concrete::syntax::raw::Term | //! '-------------------------------------' //! | .-------------------------------------. //! pikelet_concrete::elaborate::{check,infer} <---------- | pikelet_core::syntax::domain::Value | //! | '-------------------------------------' //! v ^ //! .----------------------------------. | //! | pikelet_core::syntax::core::Term | -- pikelet_core::normalize -------' //! '----------------------------------' //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Middle (TODO) | //! | //! v //! A-Normal Form (ANF) //! | //! v //! Closure Conversion (CC) //! | //! v //! Static Single Assignment (SSA) //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Backend (TODO) | //! | //! v //! Codegen //! | //! *-------> Bytecode? //! | //! *-------> WASM? //! | //! *-------> Cranelift IR? //! | //! '-------> LLVM IR? //! ``` //! //! As you can see we have only built the front-end as of the time of writing. When //! we begin to build out a [compiler back end](https://github.com/pikelet-lang/pikelet/issues/9), //! more stages will be added after type checking and elaboration. //! //! ## Name binding //! //! Name binding is a surprisingly challenging thing to implement in type checkers //! and compilers. We use the [`moniker` crate](https://github.com/brendanzab/moniker) //! for this. Unfortunately this uses a quite slow method of name binding, and could //! result in performance blowouts in the future. This is something to keep an eye on! //! //! ## Performance considerations //! //! As you can see from the diagram above, this architecture leads to an //! easy-to-reason about pipeline. It does however result in the creation of lots of //! intermediate allocations of heap-allocated tree data structures that will //! ultimately be discarded. This is quite similar to the problem we face with //! iterators: //! //! ```rust,ignore //! // 'internal' iteration //! vec![1, 2, 3].map(|x| x * x).filter(|x| x < 3) //! //! // 'external' iteration //! vec![1, 2, 3].iter().map(|x| x * x).filter(|x| x < 3).collect() //! ``` //! //! The first example, which uses 'internal' iteration allocates a new collection //! after each operation, resulting in three allocated collections. We can improve //! the performance however by using 'external' iteration - ie. returning a series //! of chained iterator adaptors, that only perform the allocation on the call to //! `collect`. This emulates the 'fusion' that languages like Haskell perform to //! reduce intermediate allocations. //! //! We could potentially get some fusion between the stages of our compiler by way //! of the [visitor pattern](https://github.com/pikelet-lang/pikelet/issues/75). //! //! ## Support for interactive development //! //! It would be interesting to see how Pikelet could be implemented using an //! [asynchronous, query-based architecture](https://github.com/pikelet-lang/pikelet/issues/103). //! This will become more important as the demands of interactive development //! and incremental compilation become more pressing. In this model we would //! have to think of compilation as less a pure function from source code to //! machine code, and more as interacting with a database. //! //! ### Resources //! //! - [Queries: demand-driven compilation (Rustc Book)](https://rust-lang-nursery.github.io/rustc-guide/query.html) //! - [Anders Hejlsberg on Modern Compiler Construction (YouTube)](https://www.youtube.com/watch?v=wSdV1M7n4gQ) use codespan::CodeMap; pub use codespan::FileName; pub use codespan_reporting::{termcolor, ColorArg, Diagnostic}; use std::io; use pikelet_concrete::desugar::{Desugar, DesugarEnv}; use pikelet_concrete::elaborate::Context; use pikelet_concrete::resugar::Resugar; use pikelet_concrete::syntax::raw; use pikelet_core::syntax::{core, domain, Import}; /// An environment that keeps track of the state of a Pikelet program during /// compilation or interactive sessions #[derive(Debug, Clone)] pub struct Driver { /// The base type checking context, containing the built-in definitions context: Context, /// The base desugar environment, using the definitions from the `context` desugar_env: DesugarEnv, /// A codemap that owns the source code for any terms that are currently loaded code_map: CodeMap, } impl Driver { /// Create a new Pikelet environment, containing only the built-in definitions pub fn
() -> Driver { let context = Context::default(); let desugar_env = DesugarEnv::new(context.mappings()); Driver { context, desugar_env, code_map: CodeMap::new(), } } /// Create a new Pikelet environment, with the prelude loaded as well pub fn with_prelude() -> Driver { let mut pikelet = Driver::new(); pikelet .register_file( "prim".to_owned(), FileName::virtual_("prim"), pikelet_library::PRIM.to_owned(), ) .unwrap(); pikelet .register_file( "prelude".to_owned(), FileName::virtual_("prelude"), pikelet_library::PRELUDE.to_owned(), ) .unwrap(); pikelet } /// Add a binding to the driver's top-level environment pub fn add_binding(&mut self, name: &str, term: core::RcTerm, ann: domain::RcType) { let fv = self.desugar_env.on_binding(&name); self.context.insert_declaration(fv.clone(), ann.clone()); self.context.insert_definition(fv.clone(), term.clone()); } /// Register a file with the driver pub fn register_file( &mut self, path: String, name: FileName, src: String, ) -> Result<(), Vec<Diagnostic>> { let (term, ty) = self.infer_file(name, src)?; // FIXME: Check if import already exists self.context.insert_import(path, Import::Term(term), ty); Ok(()) } /// Infer the type of a file pub fn infer_file( &mut self, name: FileName, src: String, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { let file_map = self.code_map.add_filemap(name, src); // TODO: follow import paths let (concrete_term, _import_paths, errors) = pikelet_concrete::parse::term(&file_map); if!errors.is_empty() { return Err(errors.iter().map(|error| error.to_diagnostic()).collect()); } let raw_term = self.desugar(&concrete_term)?; self.infer_term(&raw_term) } /// Normalize the contents of a file pub fn normalize_file( &mut self, name: FileName, src: String, ) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; let (term, _) = self.infer_file(name, src)?; pikelet_core::nbe::nf_term(&self.context, &term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Infer the type of a term pub fn infer_term( &self, raw_term: &raw::RcTerm, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { pikelet_concrete::elaborate::infer_term(&self.context, &raw_term) .map_err(|err| vec![err.to_diagnostic()]) } /// Normalize a term pub fn normalize_term(&self, term: &core::RcTerm) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; pikelet_core::nbe::nf_term(&self.context, term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Desugar a term pub fn desugar<T>(&self, src: &impl Desugar<T>) -> Result<T, Vec<Diagnostic>> { src.desugar(&self.desugar_env) .map_err(|e| vec![e.to_diagnostic()]) } /// Resugar a term pub fn resugar<T>(&self, src: &impl Resugar<T>) -> T { self.context.resugar(src) } /// Emit the diagnostics using the given writer pub fn emit<'a>( &self, mut writer: impl termcolor::WriteColor, diagnostics: impl IntoIterator<Item = &'a Diagnostic>, ) -> io::Result<()> { for diagnostic in diagnostics { codespan_reporting::emit(&mut writer, &self.code_map, diagnostic)?; } Ok(()) } }
new
identifier_name
lib.rs
//! The Pikelet Compiler //! //! # Compiler Architecture //! //! In order to create a separation of concerns, we break up our compiler into many //! small stages, beginning with a source string, and ultimately ending up with //! compiled machine code. //! //! Below is a rough flow chart showing how source strings are currently lexed, //! parsed, desugared, and type checked/elaborated: //! //! ```bob //! .------------. //! | String | //! '------------' //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Frontend | //! | //! pikelet_concrete::parse::lexer //! | //! v //! .---------------------------------------. //! | pikelet_concrete::parse::lexer::Token | //! '---------------------------------------' //! | //! pikelet_concrete::parse //! | //! v //! .------------------------------------------. //! | pikelet_concrete::syntax::concrete::Term |---------> Code formatter (TODO) //! '------------------------------------------' //! | //! pikelet_concrete::desugar //! | //! v //! .-------------------------------------. //! | pikelet_concrete::syntax::raw::Term | //! '-------------------------------------' //! | .-------------------------------------. //! pikelet_concrete::elaborate::{check,infer} <---------- | pikelet_core::syntax::domain::Value | //! | '-------------------------------------' //! v ^ //! .----------------------------------. | //! | pikelet_core::syntax::core::Term | -- pikelet_core::normalize -------' //! '----------------------------------' //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Middle (TODO) | //! | //! v //! A-Normal Form (ANF) //! | //! v //! Closure Conversion (CC) //! | //! v //! Static Single Assignment (SSA) //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Backend (TODO) | //! | //! v //! Codegen //! | //! *-------> Bytecode? //! | //! *-------> WASM? //! | //! *-------> Cranelift IR? //! | //! '-------> LLVM IR? //! ``` //! //! As you can see we have only built the front-end as of the time of writing. When //! we begin to build out a [compiler back end](https://github.com/pikelet-lang/pikelet/issues/9), //! more stages will be added after type checking and elaboration. //! //! ## Name binding //! //! Name binding is a surprisingly challenging thing to implement in type checkers //! and compilers. We use the [`moniker` crate](https://github.com/brendanzab/moniker) //! for this. Unfortunately this uses a quite slow method of name binding, and could //! result in performance blowouts in the future. This is something to keep an eye on! //! //! ## Performance considerations //! //! As you can see from the diagram above, this architecture leads to an //! easy-to-reason about pipeline. It does however result in the creation of lots of //! intermediate allocations of heap-allocated tree data structures that will //! ultimately be discarded. This is quite similar to the problem we face with //! iterators: //! //! ```rust,ignore //! // 'internal' iteration //! vec![1, 2, 3].map(|x| x * x).filter(|x| x < 3) //! //! // 'external' iteration //! vec![1, 2, 3].iter().map(|x| x * x).filter(|x| x < 3).collect() //! ``` //! //! The first example, which uses 'internal' iteration allocates a new collection //! after each operation, resulting in three allocated collections. We can improve //! the performance however by using 'external' iteration - ie. returning a series //! of chained iterator adaptors, that only perform the allocation on the call to //! `collect`. This emulates the 'fusion' that languages like Haskell perform to //! reduce intermediate allocations. //! //! We could potentially get some fusion between the stages of our compiler by way //! of the [visitor pattern](https://github.com/pikelet-lang/pikelet/issues/75). //! //! ## Support for interactive development //! //! It would be interesting to see how Pikelet could be implemented using an //! [asynchronous, query-based architecture](https://github.com/pikelet-lang/pikelet/issues/103). //! This will become more important as the demands of interactive development //! and incremental compilation become more pressing. In this model we would //! have to think of compilation as less a pure function from source code to //! machine code, and more as interacting with a database. //! //! ### Resources //! //! - [Queries: demand-driven compilation (Rustc Book)](https://rust-lang-nursery.github.io/rustc-guide/query.html) //! - [Anders Hejlsberg on Modern Compiler Construction (YouTube)](https://www.youtube.com/watch?v=wSdV1M7n4gQ) use codespan::CodeMap; pub use codespan::FileName; pub use codespan_reporting::{termcolor, ColorArg, Diagnostic}; use std::io; use pikelet_concrete::desugar::{Desugar, DesugarEnv}; use pikelet_concrete::elaborate::Context; use pikelet_concrete::resugar::Resugar; use pikelet_concrete::syntax::raw; use pikelet_core::syntax::{core, domain, Import}; /// An environment that keeps track of the state of a Pikelet program during /// compilation or interactive sessions #[derive(Debug, Clone)] pub struct Driver { /// The base type checking context, containing the built-in definitions context: Context, /// The base desugar environment, using the definitions from the `context` desugar_env: DesugarEnv, /// A codemap that owns the source code for any terms that are currently loaded code_map: CodeMap, } impl Driver { /// Create a new Pikelet environment, containing only the built-in definitions pub fn new() -> Driver { let context = Context::default(); let desugar_env = DesugarEnv::new(context.mappings()); Driver { context, desugar_env, code_map: CodeMap::new(), } } /// Create a new Pikelet environment, with the prelude loaded as well pub fn with_prelude() -> Driver { let mut pikelet = Driver::new(); pikelet .register_file( "prim".to_owned(), FileName::virtual_("prim"), pikelet_library::PRIM.to_owned(), ) .unwrap(); pikelet .register_file( "prelude".to_owned(), FileName::virtual_("prelude"), pikelet_library::PRELUDE.to_owned(), ) .unwrap(); pikelet } /// Add a binding to the driver's top-level environment pub fn add_binding(&mut self, name: &str, term: core::RcTerm, ann: domain::RcType) { let fv = self.desugar_env.on_binding(&name); self.context.insert_declaration(fv.clone(), ann.clone()); self.context.insert_definition(fv.clone(), term.clone()); } /// Register a file with the driver pub fn register_file( &mut self, path: String, name: FileName, src: String, ) -> Result<(), Vec<Diagnostic>> { let (term, ty) = self.infer_file(name, src)?; // FIXME: Check if import already exists self.context.insert_import(path, Import::Term(term), ty); Ok(()) } /// Infer the type of a file pub fn infer_file( &mut self, name: FileName, src: String, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { let file_map = self.code_map.add_filemap(name, src); // TODO: follow import paths let (concrete_term, _import_paths, errors) = pikelet_concrete::parse::term(&file_map); if!errors.is_empty()
let raw_term = self.desugar(&concrete_term)?; self.infer_term(&raw_term) } /// Normalize the contents of a file pub fn normalize_file( &mut self, name: FileName, src: String, ) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; let (term, _) = self.infer_file(name, src)?; pikelet_core::nbe::nf_term(&self.context, &term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Infer the type of a term pub fn infer_term( &self, raw_term: &raw::RcTerm, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { pikelet_concrete::elaborate::infer_term(&self.context, &raw_term) .map_err(|err| vec![err.to_diagnostic()]) } /// Normalize a term pub fn normalize_term(&self, term: &core::RcTerm) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; pikelet_core::nbe::nf_term(&self.context, term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Desugar a term pub fn desugar<T>(&self, src: &impl Desugar<T>) -> Result<T, Vec<Diagnostic>> { src.desugar(&self.desugar_env) .map_err(|e| vec![e.to_diagnostic()]) } /// Resugar a term pub fn resugar<T>(&self, src: &impl Resugar<T>) -> T { self.context.resugar(src) } /// Emit the diagnostics using the given writer pub fn emit<'a>( &self, mut writer: impl termcolor::WriteColor, diagnostics: impl IntoIterator<Item = &'a Diagnostic>, ) -> io::Result<()> { for diagnostic in diagnostics { codespan_reporting::emit(&mut writer, &self.code_map, diagnostic)?; } Ok(()) } }
{ return Err(errors.iter().map(|error| error.to_diagnostic()).collect()); }
conditional_block
lib.rs
//! The Pikelet Compiler //! //! # Compiler Architecture //! //! In order to create a separation of concerns, we break up our compiler into many //! small stages, beginning with a source string, and ultimately ending up with //! compiled machine code. //! //! Below is a rough flow chart showing how source strings are currently lexed, //! parsed, desugared, and type checked/elaborated: //! //! ```bob //! .------------. //! | String | //! '------------' //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Frontend | //! | //! pikelet_concrete::parse::lexer //! | //! v //! .---------------------------------------. //! | pikelet_concrete::parse::lexer::Token | //! '---------------------------------------' //! | //! pikelet_concrete::parse //! | //! v //! .------------------------------------------. //! | pikelet_concrete::syntax::concrete::Term |---------> Code formatter (TODO) //! '------------------------------------------' //! | //! pikelet_concrete::desugar //! | //! v //! .-------------------------------------. //! | pikelet_concrete::syntax::raw::Term | //! '-------------------------------------' //! | .-------------------------------------. //! pikelet_concrete::elaborate::{check,infer} <---------- | pikelet_core::syntax::domain::Value | //! | '-------------------------------------' //! v ^ //! .----------------------------------. | //! | pikelet_core::syntax::core::Term | -- pikelet_core::normalize -------' //! '----------------------------------' //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Middle (TODO) | //! | //! v //! A-Normal Form (ANF) //! | //! v //! Closure Conversion (CC) //! | //! v //! Static Single Assignment (SSA) //! | //! | //! - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //! Backend (TODO) | //! | //! v //! Codegen //! | //! *-------> Bytecode? //! | //! *-------> WASM? //! | //! *-------> Cranelift IR? //! | //! '-------> LLVM IR? //! ``` //! //! As you can see we have only built the front-end as of the time of writing. When //! we begin to build out a [compiler back end](https://github.com/pikelet-lang/pikelet/issues/9), //! more stages will be added after type checking and elaboration. //! //! ## Name binding //! //! Name binding is a surprisingly challenging thing to implement in type checkers //! and compilers. We use the [`moniker` crate](https://github.com/brendanzab/moniker) //! for this. Unfortunately this uses a quite slow method of name binding, and could //! result in performance blowouts in the future. This is something to keep an eye on! //! //! ## Performance considerations //! //! As you can see from the diagram above, this architecture leads to an //! easy-to-reason about pipeline. It does however result in the creation of lots of //! intermediate allocations of heap-allocated tree data structures that will //! ultimately be discarded. This is quite similar to the problem we face with //! iterators: //! //! ```rust,ignore //! // 'internal' iteration //! vec![1, 2, 3].map(|x| x * x).filter(|x| x < 3) //! //! // 'external' iteration //! vec![1, 2, 3].iter().map(|x| x * x).filter(|x| x < 3).collect() //! ``` //! //! The first example, which uses 'internal' iteration allocates a new collection //! after each operation, resulting in three allocated collections. We can improve //! the performance however by using 'external' iteration - ie. returning a series //! of chained iterator adaptors, that only perform the allocation on the call to //! `collect`. This emulates the 'fusion' that languages like Haskell perform to //! reduce intermediate allocations. //! //! We could potentially get some fusion between the stages of our compiler by way //! of the [visitor pattern](https://github.com/pikelet-lang/pikelet/issues/75). //! //! ## Support for interactive development //! //! It would be interesting to see how Pikelet could be implemented using an //! [asynchronous, query-based architecture](https://github.com/pikelet-lang/pikelet/issues/103). //! This will become more important as the demands of interactive development //! and incremental compilation become more pressing. In this model we would //! have to think of compilation as less a pure function from source code to //! machine code, and more as interacting with a database. //! //! ### Resources //! //! - [Queries: demand-driven compilation (Rustc Book)](https://rust-lang-nursery.github.io/rustc-guide/query.html) //! - [Anders Hejlsberg on Modern Compiler Construction (YouTube)](https://www.youtube.com/watch?v=wSdV1M7n4gQ) use codespan::CodeMap; pub use codespan::FileName; pub use codespan_reporting::{termcolor, ColorArg, Diagnostic}; use std::io; use pikelet_concrete::desugar::{Desugar, DesugarEnv}; use pikelet_concrete::elaborate::Context; use pikelet_concrete::resugar::Resugar; use pikelet_concrete::syntax::raw; use pikelet_core::syntax::{core, domain, Import}; /// An environment that keeps track of the state of a Pikelet program during /// compilation or interactive sessions #[derive(Debug, Clone)] pub struct Driver { /// The base type checking context, containing the built-in definitions context: Context, /// The base desugar environment, using the definitions from the `context` desugar_env: DesugarEnv, /// A codemap that owns the source code for any terms that are currently loaded code_map: CodeMap, } impl Driver { /// Create a new Pikelet environment, containing only the built-in definitions pub fn new() -> Driver { let context = Context::default(); let desugar_env = DesugarEnv::new(context.mappings()); Driver { context, desugar_env, code_map: CodeMap::new(), } } /// Create a new Pikelet environment, with the prelude loaded as well pub fn with_prelude() -> Driver { let mut pikelet = Driver::new(); pikelet .register_file( "prim".to_owned(), FileName::virtual_("prim"), pikelet_library::PRIM.to_owned(), ) .unwrap(); pikelet .register_file( "prelude".to_owned(), FileName::virtual_("prelude"), pikelet_library::PRELUDE.to_owned(), ) .unwrap(); pikelet } /// Add a binding to the driver's top-level environment pub fn add_binding(&mut self, name: &str, term: core::RcTerm, ann: domain::RcType) { let fv = self.desugar_env.on_binding(&name); self.context.insert_declaration(fv.clone(), ann.clone()); self.context.insert_definition(fv.clone(), term.clone()); } /// Register a file with the driver pub fn register_file( &mut self, path: String, name: FileName, src: String, ) -> Result<(), Vec<Diagnostic>> { let (term, ty) = self.infer_file(name, src)?; // FIXME: Check if import already exists self.context.insert_import(path, Import::Term(term), ty); Ok(()) }
pub fn infer_file( &mut self, name: FileName, src: String, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { let file_map = self.code_map.add_filemap(name, src); // TODO: follow import paths let (concrete_term, _import_paths, errors) = pikelet_concrete::parse::term(&file_map); if!errors.is_empty() { return Err(errors.iter().map(|error| error.to_diagnostic()).collect()); } let raw_term = self.desugar(&concrete_term)?; self.infer_term(&raw_term) } /// Normalize the contents of a file pub fn normalize_file( &mut self, name: FileName, src: String, ) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; let (term, _) = self.infer_file(name, src)?; pikelet_core::nbe::nf_term(&self.context, &term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Infer the type of a term pub fn infer_term( &self, raw_term: &raw::RcTerm, ) -> Result<(core::RcTerm, domain::RcType), Vec<Diagnostic>> { pikelet_concrete::elaborate::infer_term(&self.context, &raw_term) .map_err(|err| vec![err.to_diagnostic()]) } /// Normalize a term pub fn normalize_term(&self, term: &core::RcTerm) -> Result<domain::RcValue, Vec<Diagnostic>> { use pikelet_concrete::elaborate::InternalError; pikelet_core::nbe::nf_term(&self.context, term) .map_err(|err| vec![InternalError::from(err).to_diagnostic()]) } /// Desugar a term pub fn desugar<T>(&self, src: &impl Desugar<T>) -> Result<T, Vec<Diagnostic>> { src.desugar(&self.desugar_env) .map_err(|e| vec![e.to_diagnostic()]) } /// Resugar a term pub fn resugar<T>(&self, src: &impl Resugar<T>) -> T { self.context.resugar(src) } /// Emit the diagnostics using the given writer pub fn emit<'a>( &self, mut writer: impl termcolor::WriteColor, diagnostics: impl IntoIterator<Item = &'a Diagnostic>, ) -> io::Result<()> { for diagnostic in diagnostics { codespan_reporting::emit(&mut writer, &self.code_map, diagnostic)?; } Ok(()) } }
/// Infer the type of a file
random_line_split
ws.rs
// Copyright 2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification. //! //! Tremor can expose a client or server connection. //! //! Text and binary frames can be used. //! //! ## `ws_server` //! //! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow //! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector. //! //! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`: //! //! ```js //! { //! "tls": true, // whether or not TLS is configured //! "peer": { //! "host": "127.0.0.1", // ip of the connection peer //! "port": 12345 // port of the connection peer //! } //! } //! ``` //! //! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event: //! //! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection. //! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection. //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------| //! | `url` | The host and port as url to listen on. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. | //! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define connector in from ws_server //! with //! preprocessors = [ //! { //! "name": "separate", //! "config": { //! "buffered": false //! } //! } //! ], //! codec = "json", //! config = { //! "url": "127.0.0.1:4242", //! } //! end; //! ``` //! //! An annotated example of a secure WS server configuration: //! //! ```tremor title="config.troy" //! define connector ws_server from ws_server //! with //! preprocessors = ["separate"], //! codec = "json", //! config = { //! "url": "0.0.0.0:65535", //! "tls": { //! # Security certificate for this service endpoint //! "cert": "./before/localhost.cert", //! # Security key //! "key": "./before/localhost.key", //! } //! } //! end; //! ``` //! //! ## `ws_client` //! //! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set: //! //! ```tremor //! let $ws_server["binary"] = true; //! ``` //! //! If nothing is provided a text frame is sent. //! //! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`: //! //! ```js //! { //! "tls": false, // whether or not tls is enabled on the connection //! "peer": { //! "host": "192.168.0.1", // ip of the connection peer //! "port": 56431 // port of the connection peer //! } //! } //! ``` //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------| //! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a non-tls plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define my_wsc out from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Connect to port 4242 on the loopback device //! "url": "ws://127.0.0.1:4242/" //! //! # Optional Transport Level Security configuration //! # "tls" = {... } //! //! # Optional tuning of the Nagle algorithm ( default: true ) //! # - By default no delay is preferred //! # "no_delay" = false //! } //! end; //! ``` //! //! An annotated example of a secure WS client configuration with //! reconnection quality of service configured: //! //! ```tremor title="config.troy" //! define connector ws_client from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Listen on all interfaces on TCP port 65535 //! "url": "wss://0.0.0.0:65535", //! //! # Prefer delay and enable the TCP Nagle algorithm //! "no_delay": false, //! //! # Enable SSL/TLS //! "tls": { //! # CA certificate //! "cafile": "./before/localhost.cert", //! # Domain //! "domain": "localhost", //! } //! }, //! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking //! reconnect = { //! "retry": { //! "interval_ms": 500, //! "growth_rate": 2, //! "max_retries": 3, //! } //! } //! end; //! ``` pub(crate) mod client; pub(crate) mod server; use crate::connectors::prelude::*; use futures::prelude::*; use futures::stream::{SplitSink, SplitStream}; use tokio::net::TcpStream; use tokio_rustls::server::TlsStream; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::WebSocketStream; pub(crate) struct WsDefaults; impl Defaults for WsDefaults { const SCHEME: &'static str = "ws"; const HOST: &'static str = "localhost"; const PORT: u16 = 80; } struct WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send, Runtime: SinkRuntime, { stream: SplitStream<WebSocketStream<Stream>>, // we keep this around for closing the writing part if the reader is done sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, } impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { fn new( stream: SplitStream<WebSocketStream<Stream>>, sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, ) -> Self { Self { stream, sink_runtime, origin_uri, meta, ctx, } } } #[async_trait::async_trait] impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> { Some(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }) } async fn read(&mut self, stream: u64) -> Result<SourceReply> { let mut is_binary = false; match self.stream.next().await { Some(Ok(message)) => { let data = match message { Message::Text(text) => text.into_bytes(), Message::Binary(binary) => { is_binary = true; binary }
let after_close = self.stream.next().await; debug_assert!( after_close.is_none(), "WS reader not behaving as expected after receiving a close message" ); return Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }); } Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => { // ignore those, but don't let the source wait return self.read(stream).await; } }; let mut meta = self.meta.clone(); if is_binary { meta.insert("binary", Value::const_true())?; }; Ok(SourceReply::Data { origin_uri: self.origin_uri.clone(), stream: Some(stream), meta: Some(meta), data, port: None, codec_overwrite: None, }) } Some(Err(_)) | None => Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }), } } async fn on_done(&mut self, stream: u64) -> StreamDone { // make the writer stop, otherwise the underlying socket will never be closed if let Some(sink_runtime) = self.sink_runtime.as_mut() { self.ctx.swallow_err( sink_runtime.unregister_stream_writer(stream).await, "Error unregistering stream", ); } StreamDone::StreamClosed } } struct WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send, { sink: SplitSink<WebSocketStream<S>, Message>, } impl WsWriter<TcpStream> { fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<TlsStream<TcpStream>> { fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self { Self { sink } } } impl WsWriter<TcpStream> { fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> { fn new_tls_client( sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>, ) -> Self { Self { sink } } } #[async_trait::async_trait] impl<S> StreamWriter for WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin, { async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> { for chunk in data { if let Some(meta) = &meta { // If metadata is set, check for a binary framing flag if let Some(true) = meta.get_bool("binary") { let message = Message::Binary(chunk); self.sink.send(message).await?; } else { let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; } } else { // No metadata, default to text ws framing let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; }; } Ok(()) } async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> { self.sink.close().await?; Ok(StreamDone::StreamClosed) } }
Message::Close(_) => { // read from the stream once again to drive the closing handshake
random_line_split
ws.rs
// Copyright 2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification. //! //! Tremor can expose a client or server connection. //! //! Text and binary frames can be used. //! //! ## `ws_server` //! //! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow //! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector. //! //! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`: //! //! ```js //! { //! "tls": true, // whether or not TLS is configured //! "peer": { //! "host": "127.0.0.1", // ip of the connection peer //! "port": 12345 // port of the connection peer //! } //! } //! ``` //! //! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event: //! //! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection. //! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection. //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------| //! | `url` | The host and port as url to listen on. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. | //! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define connector in from ws_server //! with //! preprocessors = [ //! { //! "name": "separate", //! "config": { //! "buffered": false //! } //! } //! ], //! codec = "json", //! config = { //! "url": "127.0.0.1:4242", //! } //! end; //! ``` //! //! An annotated example of a secure WS server configuration: //! //! ```tremor title="config.troy" //! define connector ws_server from ws_server //! with //! preprocessors = ["separate"], //! codec = "json", //! config = { //! "url": "0.0.0.0:65535", //! "tls": { //! # Security certificate for this service endpoint //! "cert": "./before/localhost.cert", //! # Security key //! "key": "./before/localhost.key", //! } //! } //! end; //! ``` //! //! ## `ws_client` //! //! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set: //! //! ```tremor //! let $ws_server["binary"] = true; //! ``` //! //! If nothing is provided a text frame is sent. //! //! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`: //! //! ```js //! { //! "tls": false, // whether or not tls is enabled on the connection //! "peer": { //! "host": "192.168.0.1", // ip of the connection peer //! "port": 56431 // port of the connection peer //! } //! } //! ``` //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------| //! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a non-tls plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define my_wsc out from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Connect to port 4242 on the loopback device //! "url": "ws://127.0.0.1:4242/" //! //! # Optional Transport Level Security configuration //! # "tls" = {... } //! //! # Optional tuning of the Nagle algorithm ( default: true ) //! # - By default no delay is preferred //! # "no_delay" = false //! } //! end; //! ``` //! //! An annotated example of a secure WS client configuration with //! reconnection quality of service configured: //! //! ```tremor title="config.troy" //! define connector ws_client from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Listen on all interfaces on TCP port 65535 //! "url": "wss://0.0.0.0:65535", //! //! # Prefer delay and enable the TCP Nagle algorithm //! "no_delay": false, //! //! # Enable SSL/TLS //! "tls": { //! # CA certificate //! "cafile": "./before/localhost.cert", //! # Domain //! "domain": "localhost", //! } //! }, //! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking //! reconnect = { //! "retry": { //! "interval_ms": 500, //! "growth_rate": 2, //! "max_retries": 3, //! } //! } //! end; //! ``` pub(crate) mod client; pub(crate) mod server; use crate::connectors::prelude::*; use futures::prelude::*; use futures::stream::{SplitSink, SplitStream}; use tokio::net::TcpStream; use tokio_rustls::server::TlsStream; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::WebSocketStream; pub(crate) struct WsDefaults; impl Defaults for WsDefaults { const SCHEME: &'static str = "ws"; const HOST: &'static str = "localhost"; const PORT: u16 = 80; } struct WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send, Runtime: SinkRuntime, { stream: SplitStream<WebSocketStream<Stream>>, // we keep this around for closing the writing part if the reader is done sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, } impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { fn new( stream: SplitStream<WebSocketStream<Stream>>, sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, ) -> Self { Self { stream, sink_runtime, origin_uri, meta, ctx, } } } #[async_trait::async_trait] impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> { Some(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }) } async fn read(&mut self, stream: u64) -> Result<SourceReply> { let mut is_binary = false; match self.stream.next().await { Some(Ok(message)) => { let data = match message { Message::Text(text) => text.into_bytes(), Message::Binary(binary) => { is_binary = true; binary } Message::Close(_) => { // read from the stream once again to drive the closing handshake let after_close = self.stream.next().await; debug_assert!( after_close.is_none(), "WS reader not behaving as expected after receiving a close message" ); return Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }); } Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => { // ignore those, but don't let the source wait return self.read(stream).await; } }; let mut meta = self.meta.clone(); if is_binary
; Ok(SourceReply::Data { origin_uri: self.origin_uri.clone(), stream: Some(stream), meta: Some(meta), data, port: None, codec_overwrite: None, }) } Some(Err(_)) | None => Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }), } } async fn on_done(&mut self, stream: u64) -> StreamDone { // make the writer stop, otherwise the underlying socket will never be closed if let Some(sink_runtime) = self.sink_runtime.as_mut() { self.ctx.swallow_err( sink_runtime.unregister_stream_writer(stream).await, "Error unregistering stream", ); } StreamDone::StreamClosed } } struct WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send, { sink: SplitSink<WebSocketStream<S>, Message>, } impl WsWriter<TcpStream> { fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<TlsStream<TcpStream>> { fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self { Self { sink } } } impl WsWriter<TcpStream> { fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> { fn new_tls_client( sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>, ) -> Self { Self { sink } } } #[async_trait::async_trait] impl<S> StreamWriter for WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin, { async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> { for chunk in data { if let Some(meta) = &meta { // If metadata is set, check for a binary framing flag if let Some(true) = meta.get_bool("binary") { let message = Message::Binary(chunk); self.sink.send(message).await?; } else { let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; } } else { // No metadata, default to text ws framing let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; }; } Ok(()) } async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> { self.sink.close().await?; Ok(StreamDone::StreamClosed) } }
{ meta.insert("binary", Value::const_true())?; }
conditional_block
ws.rs
// Copyright 2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification. //! //! Tremor can expose a client or server connection. //! //! Text and binary frames can be used. //! //! ## `ws_server` //! //! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow //! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector. //! //! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`: //! //! ```js //! { //! "tls": true, // whether or not TLS is configured //! "peer": { //! "host": "127.0.0.1", // ip of the connection peer //! "port": 12345 // port of the connection peer //! } //! } //! ``` //! //! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event: //! //! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection. //! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection. //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------| //! | `url` | The host and port as url to listen on. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. | //! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define connector in from ws_server //! with //! preprocessors = [ //! { //! "name": "separate", //! "config": { //! "buffered": false //! } //! } //! ], //! codec = "json", //! config = { //! "url": "127.0.0.1:4242", //! } //! end; //! ``` //! //! An annotated example of a secure WS server configuration: //! //! ```tremor title="config.troy" //! define connector ws_server from ws_server //! with //! preprocessors = ["separate"], //! codec = "json", //! config = { //! "url": "0.0.0.0:65535", //! "tls": { //! # Security certificate for this service endpoint //! "cert": "./before/localhost.cert", //! # Security key //! "key": "./before/localhost.key", //! } //! } //! end; //! ``` //! //! ## `ws_client` //! //! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set: //! //! ```tremor //! let $ws_server["binary"] = true; //! ``` //! //! If nothing is provided a text frame is sent. //! //! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`: //! //! ```js //! { //! "tls": false, // whether or not tls is enabled on the connection //! "peer": { //! "host": "192.168.0.1", // ip of the connection peer //! "port": 56431 // port of the connection peer //! } //! } //! ``` //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------| //! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a non-tls plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define my_wsc out from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Connect to port 4242 on the loopback device //! "url": "ws://127.0.0.1:4242/" //! //! # Optional Transport Level Security configuration //! # "tls" = {... } //! //! # Optional tuning of the Nagle algorithm ( default: true ) //! # - By default no delay is preferred //! # "no_delay" = false //! } //! end; //! ``` //! //! An annotated example of a secure WS client configuration with //! reconnection quality of service configured: //! //! ```tremor title="config.troy" //! define connector ws_client from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Listen on all interfaces on TCP port 65535 //! "url": "wss://0.0.0.0:65535", //! //! # Prefer delay and enable the TCP Nagle algorithm //! "no_delay": false, //! //! # Enable SSL/TLS //! "tls": { //! # CA certificate //! "cafile": "./before/localhost.cert", //! # Domain //! "domain": "localhost", //! } //! }, //! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking //! reconnect = { //! "retry": { //! "interval_ms": 500, //! "growth_rate": 2, //! "max_retries": 3, //! } //! } //! end; //! ``` pub(crate) mod client; pub(crate) mod server; use crate::connectors::prelude::*; use futures::prelude::*; use futures::stream::{SplitSink, SplitStream}; use tokio::net::TcpStream; use tokio_rustls::server::TlsStream; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::WebSocketStream; pub(crate) struct WsDefaults; impl Defaults for WsDefaults { const SCHEME: &'static str = "ws"; const HOST: &'static str = "localhost"; const PORT: u16 = 80; } struct WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send, Runtime: SinkRuntime, { stream: SplitStream<WebSocketStream<Stream>>, // we keep this around for closing the writing part if the reader is done sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, } impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { fn new( stream: SplitStream<WebSocketStream<Stream>>, sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, ) -> Self { Self { stream, sink_runtime, origin_uri, meta, ctx, } } } #[async_trait::async_trait] impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> { Some(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }) } async fn read(&mut self, stream: u64) -> Result<SourceReply> { let mut is_binary = false; match self.stream.next().await { Some(Ok(message)) => { let data = match message { Message::Text(text) => text.into_bytes(), Message::Binary(binary) => { is_binary = true; binary } Message::Close(_) => { // read from the stream once again to drive the closing handshake let after_close = self.stream.next().await; debug_assert!( after_close.is_none(), "WS reader not behaving as expected after receiving a close message" ); return Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }); } Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => { // ignore those, but don't let the source wait return self.read(stream).await; } }; let mut meta = self.meta.clone(); if is_binary { meta.insert("binary", Value::const_true())?; }; Ok(SourceReply::Data { origin_uri: self.origin_uri.clone(), stream: Some(stream), meta: Some(meta), data, port: None, codec_overwrite: None, }) } Some(Err(_)) | None => Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }), } } async fn on_done(&mut self, stream: u64) -> StreamDone { // make the writer stop, otherwise the underlying socket will never be closed if let Some(sink_runtime) = self.sink_runtime.as_mut() { self.ctx.swallow_err( sink_runtime.unregister_stream_writer(stream).await, "Error unregistering stream", ); } StreamDone::StreamClosed } } struct WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send, { sink: SplitSink<WebSocketStream<S>, Message>, } impl WsWriter<TcpStream> { fn
(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<TlsStream<TcpStream>> { fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self { Self { sink } } } impl WsWriter<TcpStream> { fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> { fn new_tls_client( sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>, ) -> Self { Self { sink } } } #[async_trait::async_trait] impl<S> StreamWriter for WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin, { async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> { for chunk in data { if let Some(meta) = &meta { // If metadata is set, check for a binary framing flag if let Some(true) = meta.get_bool("binary") { let message = Message::Binary(chunk); self.sink.send(message).await?; } else { let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; } } else { // No metadata, default to text ws framing let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; }; } Ok(()) } async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> { self.sink.close().await?; Ok(StreamDone::StreamClosed) } }
new
identifier_name
ws.rs
// Copyright 2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification. //! //! Tremor can expose a client or server connection. //! //! Text and binary frames can be used. //! //! ## `ws_server` //! //! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow //! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector. //! //! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`: //! //! ```js //! { //! "tls": true, // whether or not TLS is configured //! "peer": { //! "host": "127.0.0.1", // ip of the connection peer //! "port": 12345 // port of the connection peer //! } //! } //! ``` //! //! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event: //! //! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection. //! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection. //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------| //! | `url` | The host and port as url to listen on. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. | //! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define connector in from ws_server //! with //! preprocessors = [ //! { //! "name": "separate", //! "config": { //! "buffered": false //! } //! } //! ], //! codec = "json", //! config = { //! "url": "127.0.0.1:4242", //! } //! end; //! ``` //! //! An annotated example of a secure WS server configuration: //! //! ```tremor title="config.troy" //! define connector ws_server from ws_server //! with //! preprocessors = ["separate"], //! codec = "json", //! config = { //! "url": "0.0.0.0:65535", //! "tls": { //! # Security certificate for this service endpoint //! "cert": "./before/localhost.cert", //! # Security key //! "key": "./before/localhost.key", //! } //! } //! end; //! ``` //! //! ## `ws_client` //! //! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set: //! //! ```tremor //! let $ws_server["binary"] = true; //! ``` //! //! If nothing is provided a text frame is sent. //! //! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`: //! //! ```js //! { //! "tls": false, // whether or not tls is enabled on the connection //! "peer": { //! "host": "192.168.0.1", // ip of the connection peer //! "port": 56431 // port of the connection peer //! } //! } //! ``` //! //! ### Configuration //! //! | Option | Description | Type | Required | Default value | //! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------| //! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | | //! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. | //! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) | //! //! ### Examples //! //! An annotated example of a non-tls plain WS cient configuration leveraging defaults: //! //! ```tremor title="config.troy" //! define my_wsc out from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Connect to port 4242 on the loopback device //! "url": "ws://127.0.0.1:4242/" //! //! # Optional Transport Level Security configuration //! # "tls" = {... } //! //! # Optional tuning of the Nagle algorithm ( default: true ) //! # - By default no delay is preferred //! # "no_delay" = false //! } //! end; //! ``` //! //! An annotated example of a secure WS client configuration with //! reconnection quality of service configured: //! //! ```tremor title="config.troy" //! define connector ws_client from ws_client //! with //! postprocessors = ["separate"], //! codec = "json", //! config = { //! # Listen on all interfaces on TCP port 65535 //! "url": "wss://0.0.0.0:65535", //! //! # Prefer delay and enable the TCP Nagle algorithm //! "no_delay": false, //! //! # Enable SSL/TLS //! "tls": { //! # CA certificate //! "cafile": "./before/localhost.cert", //! # Domain //! "domain": "localhost", //! } //! }, //! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking //! reconnect = { //! "retry": { //! "interval_ms": 500, //! "growth_rate": 2, //! "max_retries": 3, //! } //! } //! end; //! ``` pub(crate) mod client; pub(crate) mod server; use crate::connectors::prelude::*; use futures::prelude::*; use futures::stream::{SplitSink, SplitStream}; use tokio::net::TcpStream; use tokio_rustls::server::TlsStream; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::WebSocketStream; pub(crate) struct WsDefaults; impl Defaults for WsDefaults { const SCHEME: &'static str = "ws"; const HOST: &'static str = "localhost"; const PORT: u16 = 80; } struct WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send, Runtime: SinkRuntime, { stream: SplitStream<WebSocketStream<Stream>>, // we keep this around for closing the writing part if the reader is done sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, } impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { fn new( stream: SplitStream<WebSocketStream<Stream>>, sink_runtime: Option<Runtime>, origin_uri: EventOriginUri, meta: Value<'static>, ctx: Ctx, ) -> Self { Self { stream, sink_runtime, origin_uri, meta, ctx, } } } #[async_trait::async_trait] impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime> where Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin, Ctx: Context + Send + Sync, Runtime: SinkRuntime, { async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> { Some(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }) } async fn read(&mut self, stream: u64) -> Result<SourceReply> { let mut is_binary = false; match self.stream.next().await { Some(Ok(message)) => { let data = match message { Message::Text(text) => text.into_bytes(), Message::Binary(binary) => { is_binary = true; binary } Message::Close(_) => { // read from the stream once again to drive the closing handshake let after_close = self.stream.next().await; debug_assert!( after_close.is_none(), "WS reader not behaving as expected after receiving a close message" ); return Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }); } Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => { // ignore those, but don't let the source wait return self.read(stream).await; } }; let mut meta = self.meta.clone(); if is_binary { meta.insert("binary", Value::const_true())?; }; Ok(SourceReply::Data { origin_uri: self.origin_uri.clone(), stream: Some(stream), meta: Some(meta), data, port: None, codec_overwrite: None, }) } Some(Err(_)) | None => Ok(SourceReply::EndStream { origin_uri: self.origin_uri.clone(), stream, meta: Some(self.meta.clone()), }), } } async fn on_done(&mut self, stream: u64) -> StreamDone { // make the writer stop, otherwise the underlying socket will never be closed if let Some(sink_runtime) = self.sink_runtime.as_mut() { self.ctx.swallow_err( sink_runtime.unregister_stream_writer(stream).await, "Error unregistering stream", ); } StreamDone::StreamClosed } } struct WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send, { sink: SplitSink<WebSocketStream<S>, Message>, } impl WsWriter<TcpStream> { fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<TlsStream<TcpStream>> { fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self
} impl WsWriter<TcpStream> { fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self { Self { sink } } } impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> { fn new_tls_client( sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>, ) -> Self { Self { sink } } } #[async_trait::async_trait] impl<S> StreamWriter for WsWriter<S> where S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin, { async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> { for chunk in data { if let Some(meta) = &meta { // If metadata is set, check for a binary framing flag if let Some(true) = meta.get_bool("binary") { let message = Message::Binary(chunk); self.sink.send(message).await?; } else { let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; } } else { // No metadata, default to text ws framing let message = std::str::from_utf8(&chunk)?; let message = Message::Text(message.to_string()); self.sink.send(message).await?; }; } Ok(()) } async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> { self.sink.close().await?; Ok(StreamDone::StreamClosed) } }
{ Self { sink } }
identifier_body
cabi.rs
use std::ptr; use std::mem; use std::slice; use std::panic; use std::ffi::{CStr, OsStr}; use std::borrow::Cow; use std::os::raw::{c_int, c_uint, c_char}; use std::os::unix::ffi::OsStrExt; use proguard::MappingView; use sourcemap::Error as SourceMapError; use errors::{Error, ErrorKind, Result}; use unified::{View, TokenMatch, Index, ViewOrIndex}; use memdb::DumpOptions; fn resultbox<T>(val: T) -> Result<*mut T> { Ok(Box::into_raw(Box::new(val))) } #[derive(Debug)] #[repr(C)] pub struct Token { pub dst_line: c_uint, pub dst_col: c_uint, pub src_line: c_uint, pub src_col: c_uint, pub name: *const u8, pub name_len: c_uint, pub src: *const u8, pub src_len: c_uint, pub src_id: c_uint, } #[derive(Debug)] #[repr(C)] pub struct Str { pub data: *const u8, pub len: c_uint, } #[derive(Debug)] #[repr(C)] pub struct CError { pub message: *const u8, pub failed: c_int, pub code: c_int, } fn get_error_code_from_kind(kind: &ErrorKind) -> c_int { match *kind { ErrorKind::SourceMap(SourceMapError::IndexedSourcemap) => 2, ErrorKind::SourceMap(SourceMapError::BadJson(_)) => 3, ErrorKind::SourceMap(SourceMapError::CannotFlatten(_)) => 4, ErrorKind::UnsupportedMemDbVersion => 5, ErrorKind::Io(_) => 6, ErrorKind::TooManySources => 20, ErrorKind::TooManyNames => 21, ErrorKind::LocationOverflow => 22, ErrorKind::AlreadyMemDb => 23, _ => 1, } } unsafe fn set_token<'a>(out: *mut Token, tm: &'a TokenMatch<'a>) { (*out).dst_line = tm.dst_line; (*out).dst_col = tm.dst_col; (*out).src_line = tm.src_line; (*out).src_col = tm.src_col; (*out).name = match tm.name { Some(name) => name.as_ptr(), None => ptr::null() }; (*out).name_len = tm.name.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint; (*out).src = match tm.src { Some(src) => src.as_ptr(), None => ptr::null() }; (*out).src_len = tm.src.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint; (*out).src_id = tm.src_id; } unsafe fn notify_err(err: Error, err_out: *mut CError) { if!err_out.is_null() { let s = format!("{}\x00", err); (*err_out).failed = 1; (*err_out).message = Box::into_raw(s.into_boxed_str()) as *mut u8; (*err_out).code = get_error_code_from_kind(err.kind()); } } unsafe fn landingpad<F: FnOnce() -> Result<T> + panic::UnwindSafe, T>( f: F, err_out: *mut CError) -> T { match panic::catch_unwind(f) { Ok(rv) => rv.map_err(|err| notify_err(err, err_out)).unwrap_or(mem::zeroed()), Err(err) => { use std::any::Any; let err = &*err as &Any; let msg = match err.downcast_ref::<&'static str>() { Some(s) => *s, None => { match err.downcast_ref::<String>() { Some(s) => &**s, None => "Box<Any>", } } }; notify_err(ErrorKind::InternalError(msg.to_string()).into(), err_out); mem::zeroed() } } } macro_rules! export ( ( $name:ident($($aname:ident: $aty:ty),*) -> Result<$rv:ty> $body:block ) => ( #[no_mangle] pub unsafe extern "C" fn $name($($aname: $aty,)* err_out: *mut CError) -> $rv { landingpad(|| $body, err_out) } ); ( $name:ident($($aname:ident: $aty:ty),*) $body:block ) => { #[no_mangle] pub unsafe extern "C" fn $name($($aname: $aty,)*) { // this silences panics and stuff landingpad(|| { $body; Ok(0 as c_int)}, ptr::null_mut()); } } ); export!(lsm_init() { fn silent_panic_handler(_pi: &panic::PanicInfo) { // don't do anything here. This disables the default printing of // panics to stderr which we really don't care about here. } panic::set_hook(Box::new(silent_panic_handler)); }); export!(lsm_view_from_json(bytes: *const u8, len: c_uint) -> Result<*mut View> { resultbox(View::json_from_slice(slice::from_raw_parts(bytes, len as usize))?) }); export!(lsm_view_from_memdb( bytes: *const u8, len: c_uint) -> Result<*mut View> { // XXX: this currently copies because that's safer. Consider improving this? resultbox(View::memdb_from_vec(slice::from_raw_parts( bytes, len as usize ).to_vec())?) }); export!(lsm_view_from_memdb_file(path: *const c_char) -> Result<*mut View> { resultbox(View::memdb_from_path(CStr::from_ptr(path).to_str()?)?) }); export!(lsm_view_free(view: *mut View) { if!view.is_null() { Box::from_raw(view); } }); export!(lsm_view_get_token_count(view: *const View) -> Result<c_uint> { Ok((*view).get_token_count() as c_uint) }); export!(lsm_view_get_token(view: *const View, idx: c_uint, out: *mut Token) -> Result<c_int> { Ok(match (*view).get_token(idx as u32) { None => 0, Some(tm) => { set_token(out, &tm); 1 } }) }); export!(lsm_view_lookup_token( view: *const View, line: c_uint, col: c_uint, out: *mut Token) -> Result<c_int> { Ok(match (*view).lookup_token(line, col) { None => 0, Some(tm) => { set_token(out, &tm); 1 } }) }); export!(lsm_view_get_original_function_name( view: *const View, line: c_uint, col: c_uint, minified_name: *const c_char, minified_source: *const c_char, name_out: *mut *const c_char) -> Result<c_uint> { Ok(match (*view).get_original_function_name( line as u32, col as u32, CStr::from_ptr(minified_name).to_str()?, CStr::from_ptr(minified_source).to_str()?) { Some(name) => { *name_out = name.as_ptr() as *const c_char; name.len() as c_uint } None => 0 }) }); export!(lsm_view_get_source_count(view: *const View) -> Result<c_uint> { Ok((*view).get_source_count() as c_uint) }); export!(lsm_view_has_source_contents(view: *const View, src_id: c_uint) -> Result<c_int> { Ok(if (*view).get_source_contents(src_id as u32).is_some() { 1 } else { 0 }) }); export!(lsm_view_get_source_contents( view: *const View, src_id: c_uint, len_out: *mut c_uint, must_free: *mut c_int) -> Result<*mut u8> { *must_free = 0; Ok(match (*view).get_source_contents(src_id as u32) { None => ptr::null_mut(), Some(contents) => { *len_out = contents.len() as c_uint; match contents { Cow::Borrowed(s) => s.as_ptr() as *mut u8, Cow::Owned(val) => { *must_free = 1; Box::into_raw(val.into_boxed_str()) as *mut u8 } } } }) }); export!(lsm_view_get_source_name( view: *const View, src_id: c_uint, len_out: *mut c_uint) -> Result<*const u8> { Ok(match (*view).get_source(src_id as u32) { None => ptr::null(), Some(name) => { *len_out = name.len() as c_uint; name.as_ptr() } }) }); export!(lsm_view_dump_memdb( view: *mut View, len_out: *mut c_uint, with_source_contents: c_int, with_names: c_int) -> Result<*mut u8> { let memdb = (*view).dump_memdb(DumpOptions { with_source_contents: with_source_contents!= 0, with_names: with_names!= 0, })?; *len_out = memdb.len() as c_uint; Ok(Box::into_raw(memdb.into_boxed_slice()) as *mut u8) }); export!(lsm_buffer_free(buf: *mut u8) { if!buf.is_null() { Box::from_raw(buf); } }); export!(lsm_index_from_json(bytes: *const u8, len: c_uint) -> Result<*mut Index> { resultbox(Index::json_from_slice(slice::from_raw_parts( bytes, len as usize ))?) }); export!(lsm_index_free(idx: *mut Index) { if!idx.is_null() { Box::from_raw(idx); } }); export!(lsm_index_can_flatten(idx: *const Index) -> Result<c_int> { Ok(if (*idx).can_flatten() { 1 } else { 0 }) }); export!(lsm_index_into_view(idx: *mut Index) -> Result<*mut View> { resultbox(Box::from_raw(idx).into_view()?) }); export!(lsm_view_or_index_from_json( bytes: *const u8, len: c_uint, view_out: *mut *mut View, idx_out: *mut *mut Index) -> Result<c_int> { match ViewOrIndex::from_slice(slice::from_raw_parts( bytes, len as usize ))? { ViewOrIndex::View(view) => { *view_out = Box::into_raw(Box::new(view)); *idx_out = ptr::null_mut(); Ok(1) } ViewOrIndex::Index(idx) => { *view_out = ptr::null_mut(); *idx_out = Box::into_raw(Box::new(idx)); Ok(2) } } }); export!(lsm_proguard_mapping_from_bytes(bytes: *const u8, len: c_uint) -> Result<*mut MappingView<'static>> { resultbox(MappingView::from_vec(slice::from_raw_parts(bytes, len as usize).to_vec())?) }); export!(lsm_proguard_mapping_from_path(filename: *const c_char) -> Result<*mut MappingView<'static>> { resultbox(MappingView::from_path( OsStr::from_bytes(CStr::from_ptr(filename).to_bytes()))?) }); export!(lsm_proguard_mapping_free(view: *mut MappingView) { if!view.is_null() { Box::from_raw(view); } }); export!(lsm_proguard_mapping_has_line_info(view: *const MappingView) -> Result<c_int> { Ok(if (*view).has_line_info() { 1 } else { 0 }) }); export!(lsm_proguard_mapping_convert_dotted_path( view: *const MappingView, path: *const c_char, lineno: c_int) -> Result<*mut u8> { let path = CStr::from_ptr(path).to_str()?; let mut iter = path.splitn(2, ':'); let cls_name = iter.next().unwrap_or(""); let meth_name = iter.next(); let s = if let Some(cls) = (*view).find_class(cls_name) { let class_name = cls.class_name(); if let Some(meth_name) = meth_name { let methods = cls.get_methods(meth_name, if lineno == 0 { None } else { Some(lineno as u32) }); if!methods.is_empty() { format!("{}:{}\x00", class_name, methods[0].name()) } else { format!("{}:{}\x00", class_name, meth_name) } } else { format!("{}\x00", class_name) }
format!("{}\x00", path) }; Ok(Box::into_raw(s.into_boxed_str()) as *mut u8) });
} else {
random_line_split
cabi.rs
use std::ptr; use std::mem; use std::slice; use std::panic; use std::ffi::{CStr, OsStr}; use std::borrow::Cow; use std::os::raw::{c_int, c_uint, c_char}; use std::os::unix::ffi::OsStrExt; use proguard::MappingView; use sourcemap::Error as SourceMapError; use errors::{Error, ErrorKind, Result}; use unified::{View, TokenMatch, Index, ViewOrIndex}; use memdb::DumpOptions; fn resultbox<T>(val: T) -> Result<*mut T> { Ok(Box::into_raw(Box::new(val))) } #[derive(Debug)] #[repr(C)] pub struct Token { pub dst_line: c_uint, pub dst_col: c_uint, pub src_line: c_uint, pub src_col: c_uint, pub name: *const u8, pub name_len: c_uint, pub src: *const u8, pub src_len: c_uint, pub src_id: c_uint, } #[derive(Debug)] #[repr(C)] pub struct Str { pub data: *const u8, pub len: c_uint, } #[derive(Debug)] #[repr(C)] pub struct CError { pub message: *const u8, pub failed: c_int, pub code: c_int, } fn get_error_code_from_kind(kind: &ErrorKind) -> c_int { match *kind { ErrorKind::SourceMap(SourceMapError::IndexedSourcemap) => 2, ErrorKind::SourceMap(SourceMapError::BadJson(_)) => 3, ErrorKind::SourceMap(SourceMapError::CannotFlatten(_)) => 4, ErrorKind::UnsupportedMemDbVersion => 5, ErrorKind::Io(_) => 6, ErrorKind::TooManySources => 20, ErrorKind::TooManyNames => 21, ErrorKind::LocationOverflow => 22, ErrorKind::AlreadyMemDb => 23, _ => 1, } } unsafe fn
<'a>(out: *mut Token, tm: &'a TokenMatch<'a>) { (*out).dst_line = tm.dst_line; (*out).dst_col = tm.dst_col; (*out).src_line = tm.src_line; (*out).src_col = tm.src_col; (*out).name = match tm.name { Some(name) => name.as_ptr(), None => ptr::null() }; (*out).name_len = tm.name.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint; (*out).src = match tm.src { Some(src) => src.as_ptr(), None => ptr::null() }; (*out).src_len = tm.src.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint; (*out).src_id = tm.src_id; } unsafe fn notify_err(err: Error, err_out: *mut CError) { if!err_out.is_null() { let s = format!("{}\x00", err); (*err_out).failed = 1; (*err_out).message = Box::into_raw(s.into_boxed_str()) as *mut u8; (*err_out).code = get_error_code_from_kind(err.kind()); } } unsafe fn landingpad<F: FnOnce() -> Result<T> + panic::UnwindSafe, T>( f: F, err_out: *mut CError) -> T { match panic::catch_unwind(f) { Ok(rv) => rv.map_err(|err| notify_err(err, err_out)).unwrap_or(mem::zeroed()), Err(err) => { use std::any::Any; let err = &*err as &Any; let msg = match err.downcast_ref::<&'static str>() { Some(s) => *s, None => { match err.downcast_ref::<String>() { Some(s) => &**s, None => "Box<Any>", } } }; notify_err(ErrorKind::InternalError(msg.to_string()).into(), err_out); mem::zeroed() } } } macro_rules! export ( ( $name:ident($($aname:ident: $aty:ty),*) -> Result<$rv:ty> $body:block ) => ( #[no_mangle] pub unsafe extern "C" fn $name($($aname: $aty,)* err_out: *mut CError) -> $rv { landingpad(|| $body, err_out) } ); ( $name:ident($($aname:ident: $aty:ty),*) $body:block ) => { #[no_mangle] pub unsafe extern "C" fn $name($($aname: $aty,)*) { // this silences panics and stuff landingpad(|| { $body; Ok(0 as c_int)}, ptr::null_mut()); } } ); export!(lsm_init() { fn silent_panic_handler(_pi: &panic::PanicInfo) { // don't do anything here. This disables the default printing of // panics to stderr which we really don't care about here. } panic::set_hook(Box::new(silent_panic_handler)); }); export!(lsm_view_from_json(bytes: *const u8, len: c_uint) -> Result<*mut View> { resultbox(View::json_from_slice(slice::from_raw_parts(bytes, len as usize))?) }); export!(lsm_view_from_memdb( bytes: *const u8, len: c_uint) -> Result<*mut View> { // XXX: this currently copies because that's safer. Consider improving this? resultbox(View::memdb_from_vec(slice::from_raw_parts( bytes, len as usize ).to_vec())?) }); export!(lsm_view_from_memdb_file(path: *const c_char) -> Result<*mut View> { resultbox(View::memdb_from_path(CStr::from_ptr(path).to_str()?)?) }); export!(lsm_view_free(view: *mut View) { if!view.is_null() { Box::from_raw(view); } }); export!(lsm_view_get_token_count(view: *const View) -> Result<c_uint> { Ok((*view).get_token_count() as c_uint) }); export!(lsm_view_get_token(view: *const View, idx: c_uint, out: *mut Token) -> Result<c_int> { Ok(match (*view).get_token(idx as u32) { None => 0, Some(tm) => { set_token(out, &tm); 1 } }) }); export!(lsm_view_lookup_token( view: *const View, line: c_uint, col: c_uint, out: *mut Token) -> Result<c_int> { Ok(match (*view).lookup_token(line, col) { None => 0, Some(tm) => { set_token(out, &tm); 1 } }) }); export!(lsm_view_get_original_function_name( view: *const View, line: c_uint, col: c_uint, minified_name: *const c_char, minified_source: *const c_char, name_out: *mut *const c_char) -> Result<c_uint> { Ok(match (*view).get_original_function_name( line as u32, col as u32, CStr::from_ptr(minified_name).to_str()?, CStr::from_ptr(minified_source).to_str()?) { Some(name) => { *name_out = name.as_ptr() as *const c_char; name.len() as c_uint } None => 0 }) }); export!(lsm_view_get_source_count(view: *const View) -> Result<c_uint> { Ok((*view).get_source_count() as c_uint) }); export!(lsm_view_has_source_contents(view: *const View, src_id: c_uint) -> Result<c_int> { Ok(if (*view).get_source_contents(src_id as u32).is_some() { 1 } else { 0 }) }); export!(lsm_view_get_source_contents( view: *const View, src_id: c_uint, len_out: *mut c_uint, must_free: *mut c_int) -> Result<*mut u8> { *must_free = 0; Ok(match (*view).get_source_contents(src_id as u32) { None => ptr::null_mut(), Some(contents) => { *len_out = contents.len() as c_uint; match contents { Cow::Borrowed(s) => s.as_ptr() as *mut u8, Cow::Owned(val) => { *must_free = 1; Box::into_raw(val.into_boxed_str()) as *mut u8 } } } }) }); export!(lsm_view_get_source_name( view: *const View, src_id: c_uint, len_out: *mut c_uint) -> Result<*const u8> { Ok(match (*view).get_source(src_id as u32) { None => ptr::null(), Some(name) => { *len_out = name.len() as c_uint; name.as_ptr() } }) }); export!(lsm_view_dump_memdb( view: *mut View, len_out: *mut c_uint, with_source_contents: c_int, with_names: c_int) -> Result<*mut u8> { let memdb = (*view).dump_memdb(DumpOptions { with_source_contents: with_source_contents!= 0, with_names: with_names!= 0, })?; *len_out = memdb.len() as c_uint; Ok(Box::into_raw(memdb.into_boxed_slice()) as *mut u8) }); export!(lsm_buffer_free(buf: *mut u8) { if!buf.is_null() { Box::from_raw(buf); } }); export!(lsm_index_from_json(bytes: *const u8, len: c_uint) -> Result<*mut Index> { resultbox(Index::json_from_slice(slice::from_raw_parts( bytes, len as usize ))?) }); export!(lsm_index_free(idx: *mut Index) { if!idx.is_null() { Box::from_raw(idx); } }); export!(lsm_index_can_flatten(idx: *const Index) -> Result<c_int> { Ok(if (*idx).can_flatten() { 1 } else { 0 }) }); export!(lsm_index_into_view(idx: *mut Index) -> Result<*mut View> { resultbox(Box::from_raw(idx).into_view()?) }); export!(lsm_view_or_index_from_json( bytes: *const u8, len: c_uint, view_out: *mut *mut View, idx_out: *mut *mut Index) -> Result<c_int> { match ViewOrIndex::from_slice(slice::from_raw_parts( bytes, len as usize ))? { ViewOrIndex::View(view) => { *view_out = Box::into_raw(Box::new(view)); *idx_out = ptr::null_mut(); Ok(1) } ViewOrIndex::Index(idx) => { *view_out = ptr::null_mut(); *idx_out = Box::into_raw(Box::new(idx)); Ok(2) } } }); export!(lsm_proguard_mapping_from_bytes(bytes: *const u8, len: c_uint) -> Result<*mut MappingView<'static>> { resultbox(MappingView::from_vec(slice::from_raw_parts(bytes, len as usize).to_vec())?) }); export!(lsm_proguard_mapping_from_path(filename: *const c_char) -> Result<*mut MappingView<'static>> { resultbox(MappingView::from_path( OsStr::from_bytes(CStr::from_ptr(filename).to_bytes()))?) }); export!(lsm_proguard_mapping_free(view: *mut MappingView) { if!view.is_null() { Box::from_raw(view); } }); export!(lsm_proguard_mapping_has_line_info(view: *const MappingView) -> Result<c_int> { Ok(if (*view).has_line_info() { 1 } else { 0 }) }); export!(lsm_proguard_mapping_convert_dotted_path( view: *const MappingView, path: *const c_char, lineno: c_int) -> Result<*mut u8> { let path = CStr::from_ptr(path).to_str()?; let mut iter = path.splitn(2, ':'); let cls_name = iter.next().unwrap_or(""); let meth_name = iter.next(); let s = if let Some(cls) = (*view).find_class(cls_name) { let class_name = cls.class_name(); if let Some(meth_name) = meth_name { let methods = cls.get_methods(meth_name, if lineno == 0 { None } else { Some(lineno as u32) }); if!methods.is_empty() { format!("{}:{}\x00", class_name, methods[0].name()) } else { format!("{}:{}\x00", class_name, meth_name) } } else { format!("{}\x00", class_name) } } else { format!("{}\x00", path) }; Ok(Box::into_raw(s.into_boxed_str()) as *mut u8) });
set_token
identifier_name
main.rs
/** * Rust's module/package system is *very* fully-featured and rich. * It's worth revisiting the rust book chapter, which is chock full of * special-case details, synonyms, tricks and tips. * * https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html * * * Here are a few of the highest-level details before we start: * * - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml) * - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml) * - The `main.js` becomes an executable / binary * - The `lib.js` becomes an exportable library * - Additional top-level executable files can go into `src/bin/foo.rs` * Each such `*.rs` file will become one (1) top-level binary * - Other top-level files in `src/` are importable within the project, * but do not become exportable automatically. * - You can combine keywords in your lib.rs to re-export everything, * but you only get that one anointed lib.rs file for that purpose. * * Until now we have never used the `mod` keyword, which has made it easy * to do simple `hello world` style examples. That has meant that all the * code in each demo file is in the same namespace. Everything is visible * to everything else, which has made everything easy, hooray! But at the * same time, *nothing* has been exportable outside of the single main.rs * files, which means *none* of that code can ever be re-used, booo! * * The `mod` keyword is thus a double edged sword. As soon as you start * using it: * * - you suddenly have to deal with public vs private and _access_ issues * - entities can now be *re-used* by other files that import the module * * Obviously that's a necessary trade-off for anything non-trivial, so let's * roll up our sleeves and get familiar with it! * * This file is our one (1) `main.rs` for the project, so it will be our * one (1) executable. However, we will make reference to four (4) other * _modules_ for this project, with differing access strategies. * * - `foo`, a module defined right here inline with this file * But defining modules within `main.rs` is too trivial to be useful. * - `spam`, a module defined in a sibling file (./spam.rs) all by itself * This pattern is probably all you would need for small projects. * - `sounds`, a module defined in a sibling file (./sounds.rs) but which * also has with associated subdirectories. This example comes from the * official Rust book, but I don't like the dual use of a sounds.rs file * with a./sounds/ directory. There is implicit magic here which I dislike. * - `things`, a module defined in a sibling directory with an internal `mod.rs` * file. This pattern comes from the Blandy & Orendorff book, and I like * the fact that everything about it is explicit. This is the one I would * use in my own projects, but you have to be familiar with all of them! * * Finally, we'll show the use of completely external modules inside `things`. * It uses the *external* crate `rand`, the de facto standard way to generate * random values. This library is not part of the rust core, but it *was* a * part of the core long ago, and it is still maintained by the same devs who * *do* maintain the rust core. So it's as anointed as you can get without * being bundled with standard rust. * * All of those target modules are *somewhere* in the project, but none of them * are part of the default public scope of this `main.rs` file. Therefore, we * have to announce that we will be using each of them, and then provide the * correct implementation target for each. Both steps are necessary! You must * _declare_ that you're going to use a module, and then you must _implement_ * that module. The declarations take the same shape for all four modules, * but their implementations are all different. * */ // For inline examples *only*, the `declaration` and the `implementation` // take place in the same location. That's what the word "inline" means! mod foo { // you are free to define as many nested submodules as you like // but remember that *everything* is private by default! // So we have to explicitly declare the submodules as public pub mod bar { // but eventually you will need a leaf node or why bother? pub fn zug(path: &str) { println!("I am Zug; hear me roar (via a {} path!)", path); } // this function demonstrates the use of `super::` // this is the only way to reach *up* and *over* pub fn qux() { // without super::, qux cannot see up to blort // blort("qux cannot see blort directly"); // compiler error > blort not found in this scope // but this works: super::blort("message from qux"); } } // this fn is part of foo, so it c pub fn
(msg: &str) { println!("Blort says: {}", msg); } } // next, the series of *declarations*, each of which points to one of the // module implementations discussed above. The declaration phase is easy to // forget, because the implementations are all part of the project, and so // their source files are very close by. But you are *required* to make an // explicit declaration nontheless. If you throw in references to `crate::x::y` // without having preceded them with one of these declarations (e.g. `mod x;`), // the compiler will error out with a "failed to resolve" error message. // (So remember!!): even though these modules are local to the project, you // cannot make *undeclared* relative or absolute path references to them!! // Declare that we are looking for a `spam` module as a peer of some kind // Note that this differs from `use`, which would mean we were expecting // Cargo to find the installed library in whatever cache directory it uses. // NB: this means that a peer/sibling directory is _not_ automatically treated // as a module by default! Only peers that you declare in this way are modules. mod spam; // treat a spam peer (of some kind!) as a module // in this case, it's a file: spam.rs // and that file is self-contained, with no further path-based shenanigans // Declare that we are looking for a `sound` module as a peer of some kind. mod sound; // treat a sound peer (of some kind!) as a module // ending in semicolon instead of braces tells the compiler to find this module // In this case it is a `./sound.rs` file, which *happens* to include its own // submodule in its own subdirectory. The peer file is *definitive*, but nested // subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern // bothers me, and I much prefer the next and final one, in which we have a // top-level directory matching the module name, plus an explicit barrel file. // Declare that we are looking for a `things` module as a peer or some kind mod things; // treat a things peer (of some kind!) as a module. // in this case, it's a directory, which has an inner./mod.js file, which // acts as the one-and-only barrel file for that module. I like this approach // _much_ better than the weird one used for sound, above. Everything here is // explicit, and there is no compiler magic going on anywhere. fn main() { // module `foo` is the first and simplest example, since it is inline. // we can get to the inline `foo` module two ways: // Via an absolute path, starting with the language-level keyword `crate` crate::foo::bar::zug("absolute"); // Or via *relative* path, where anything that is a peer of `main` // can be used as the top of the path. foo::bar::zug("relative"); // NB: you could also start with `super::` to back up one level // There is no need for a `sub::`, because that's what you're doing with `::`! foo::blort("message from main"); // call blort directly foo::bar::qux(); // qux also calls blort, via super:: shenanigans // module `spam` is the second-simplest example. It points to an all-in-one // peer file whose contents are eerily similar to our inline foo, above. crate::spam::eggs::toast("absolute"); spam::eggs::toast("relative"); // and here is the same super:: demo we did with foo, but for spam spam::beans("message from main"); spam::eggs::ham(); // Then the `sound` module uses a weird pattern where there is // both a `./sound.rs` peer file, and a `./sound/` peer directory. // The weirdest part is that the `sound.rs` peer file is allowed to // refer to the `instrument` file without specifying the true path: // there's just an implicit automagic compiler leap where it knows to // look for a./sound/instrument.rs` file. This bothers me a lot! crate::sound::instrument::clarinet("absolute"); // But once you get path that irritant, you can do the same absolute vs // relative thing that we've demonstrated for everytone else. sound::instrument::clarinet("relative"); // module `things` shows a more-scalable approach to modules // There is no `things.rs`, but there *is* a./things/ peer directory // and that directory has a `mod.rs` file, which acts as the top level // file for the module, much like `index.js` does in a node project. crate::things::greet(); // use things via relative path let stuff = things::assortment(); println!("An assortment of things: {:?}", stuff); // accessing nested modules can get verbose! let dog = things::animal::Animal::new("Rover"); println!("Rover says 'ruff ruff': {:?}", dog); // use the `use` keyword to allow terser access use crate::things::mineral::Mineral; // the final segment is now in scope as is let coal = Mineral::new("Coal, ick!"); println!("Hi! I cause global warming!: {:?}", coal); // the `as` option allows you to avoid namespace collisions if necessary use crate::things::vegetable::Vegetable as Plant; let oak = Plant::new("oak"); println!("From a tiny acorn did I grow: {:?}", oak); } // there are still plenty of other little details to review in the article // in the main Rust book. This is a big topic, because it is _important_! // For example, you can `pub use` to re-export under shorter names, and there // are import syntaxes to condense multiple imports from sub-branches of the // same overall module. And there's a wildcard glob '*' to import everything // from a module, complete with the usual warnings about how that can be a bad // thing, because it makes it much harder to trace relationships. // TODO: go back and re-read the whole chapter, seriously!
blort
identifier_name
main.rs
/** * Rust's module/package system is *very* fully-featured and rich. * It's worth revisiting the rust book chapter, which is chock full of * special-case details, synonyms, tricks and tips. * * https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html * * * Here are a few of the highest-level details before we start: * * - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml) * - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml) * - The `main.js` becomes an executable / binary * - The `lib.js` becomes an exportable library * - Additional top-level executable files can go into `src/bin/foo.rs` * Each such `*.rs` file will become one (1) top-level binary * - Other top-level files in `src/` are importable within the project, * but do not become exportable automatically. * - You can combine keywords in your lib.rs to re-export everything, * but you only get that one anointed lib.rs file for that purpose. * * Until now we have never used the `mod` keyword, which has made it easy * to do simple `hello world` style examples. That has meant that all the * code in each demo file is in the same namespace. Everything is visible * to everything else, which has made everything easy, hooray! But at the * same time, *nothing* has been exportable outside of the single main.rs * files, which means *none* of that code can ever be re-used, booo! * * The `mod` keyword is thus a double edged sword. As soon as you start * using it: * * - you suddenly have to deal with public vs private and _access_ issues * - entities can now be *re-used* by other files that import the module * * Obviously that's a necessary trade-off for anything non-trivial, so let's * roll up our sleeves and get familiar with it! * * This file is our one (1) `main.rs` for the project, so it will be our * one (1) executable. However, we will make reference to four (4) other * _modules_ for this project, with differing access strategies. * * - `foo`, a module defined right here inline with this file * But defining modules within `main.rs` is too trivial to be useful. * - `spam`, a module defined in a sibling file (./spam.rs) all by itself * This pattern is probably all you would need for small projects. * - `sounds`, a module defined in a sibling file (./sounds.rs) but which * also has with associated subdirectories. This example comes from the * official Rust book, but I don't like the dual use of a sounds.rs file * with a./sounds/ directory. There is implicit magic here which I dislike. * - `things`, a module defined in a sibling directory with an internal `mod.rs` * file. This pattern comes from the Blandy & Orendorff book, and I like * the fact that everything about it is explicit. This is the one I would * use in my own projects, but you have to be familiar with all of them! * * Finally, we'll show the use of completely external modules inside `things`. * It uses the *external* crate `rand`, the de facto standard way to generate * random values. This library is not part of the rust core, but it *was* a * part of the core long ago, and it is still maintained by the same devs who * *do* maintain the rust core. So it's as anointed as you can get without * being bundled with standard rust. * * All of those target modules are *somewhere* in the project, but none of them * are part of the default public scope of this `main.rs` file. Therefore, we * have to announce that we will be using each of them, and then provide the * correct implementation target for each. Both steps are necessary! You must * _declare_ that you're going to use a module, and then you must _implement_ * that module. The declarations take the same shape for all four modules, * but their implementations are all different. * */ // For inline examples *only*, the `declaration` and the `implementation` // take place in the same location. That's what the word "inline" means! mod foo { // you are free to define as many nested submodules as you like // but remember that *everything* is private by default! // So we have to explicitly declare the submodules as public pub mod bar { // but eventually you will need a leaf node or why bother? pub fn zug(path: &str) { println!("I am Zug; hear me roar (via a {} path!)", path); } // this function demonstrates the use of `super::` // this is the only way to reach *up* and *over* pub fn qux() { // without super::, qux cannot see up to blort // blort("qux cannot see blort directly"); // compiler error > blort not found in this scope // but this works: super::blort("message from qux"); } } // this fn is part of foo, so it c pub fn blort(msg: &str)
} // next, the series of *declarations*, each of which points to one of the // module implementations discussed above. The declaration phase is easy to // forget, because the implementations are all part of the project, and so // their source files are very close by. But you are *required* to make an // explicit declaration nontheless. If you throw in references to `crate::x::y` // without having preceded them with one of these declarations (e.g. `mod x;`), // the compiler will error out with a "failed to resolve" error message. // (So remember!!): even though these modules are local to the project, you // cannot make *undeclared* relative or absolute path references to them!! // Declare that we are looking for a `spam` module as a peer of some kind // Note that this differs from `use`, which would mean we were expecting // Cargo to find the installed library in whatever cache directory it uses. // NB: this means that a peer/sibling directory is _not_ automatically treated // as a module by default! Only peers that you declare in this way are modules. mod spam; // treat a spam peer (of some kind!) as a module // in this case, it's a file: spam.rs // and that file is self-contained, with no further path-based shenanigans // Declare that we are looking for a `sound` module as a peer of some kind. mod sound; // treat a sound peer (of some kind!) as a module // ending in semicolon instead of braces tells the compiler to find this module // In this case it is a `./sound.rs` file, which *happens* to include its own // submodule in its own subdirectory. The peer file is *definitive*, but nested // subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern // bothers me, and I much prefer the next and final one, in which we have a // top-level directory matching the module name, plus an explicit barrel file. // Declare that we are looking for a `things` module as a peer or some kind mod things; // treat a things peer (of some kind!) as a module. // in this case, it's a directory, which has an inner./mod.js file, which // acts as the one-and-only barrel file for that module. I like this approach // _much_ better than the weird one used for sound, above. Everything here is // explicit, and there is no compiler magic going on anywhere. fn main() { // module `foo` is the first and simplest example, since it is inline. // we can get to the inline `foo` module two ways: // Via an absolute path, starting with the language-level keyword `crate` crate::foo::bar::zug("absolute"); // Or via *relative* path, where anything that is a peer of `main` // can be used as the top of the path. foo::bar::zug("relative"); // NB: you could also start with `super::` to back up one level // There is no need for a `sub::`, because that's what you're doing with `::`! foo::blort("message from main"); // call blort directly foo::bar::qux(); // qux also calls blort, via super:: shenanigans // module `spam` is the second-simplest example. It points to an all-in-one // peer file whose contents are eerily similar to our inline foo, above. crate::spam::eggs::toast("absolute"); spam::eggs::toast("relative"); // and here is the same super:: demo we did with foo, but for spam spam::beans("message from main"); spam::eggs::ham(); // Then the `sound` module uses a weird pattern where there is // both a `./sound.rs` peer file, and a `./sound/` peer directory. // The weirdest part is that the `sound.rs` peer file is allowed to // refer to the `instrument` file without specifying the true path: // there's just an implicit automagic compiler leap where it knows to // look for a./sound/instrument.rs` file. This bothers me a lot! crate::sound::instrument::clarinet("absolute"); // But once you get path that irritant, you can do the same absolute vs // relative thing that we've demonstrated for everytone else. sound::instrument::clarinet("relative"); // module `things` shows a more-scalable approach to modules // There is no `things.rs`, but there *is* a./things/ peer directory // and that directory has a `mod.rs` file, which acts as the top level // file for the module, much like `index.js` does in a node project. crate::things::greet(); // use things via relative path let stuff = things::assortment(); println!("An assortment of things: {:?}", stuff); // accessing nested modules can get verbose! let dog = things::animal::Animal::new("Rover"); println!("Rover says 'ruff ruff': {:?}", dog); // use the `use` keyword to allow terser access use crate::things::mineral::Mineral; // the final segment is now in scope as is let coal = Mineral::new("Coal, ick!"); println!("Hi! I cause global warming!: {:?}", coal); // the `as` option allows you to avoid namespace collisions if necessary use crate::things::vegetable::Vegetable as Plant; let oak = Plant::new("oak"); println!("From a tiny acorn did I grow: {:?}", oak); } // there are still plenty of other little details to review in the article // in the main Rust book. This is a big topic, because it is _important_! // For example, you can `pub use` to re-export under shorter names, and there // are import syntaxes to condense multiple imports from sub-branches of the // same overall module. And there's a wildcard glob '*' to import everything // from a module, complete with the usual warnings about how that can be a bad // thing, because it makes it much harder to trace relationships. // TODO: go back and re-read the whole chapter, seriously!
{ println!("Blort says: {}", msg); }
identifier_body
main.rs
/** * Rust's module/package system is *very* fully-featured and rich. * It's worth revisiting the rust book chapter, which is chock full of * special-case details, synonyms, tricks and tips. * * https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html * * * Here are a few of the highest-level details before we start: * * - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml) * - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml) * - The `main.js` becomes an executable / binary * - The `lib.js` becomes an exportable library * - Additional top-level executable files can go into `src/bin/foo.rs` * Each such `*.rs` file will become one (1) top-level binary * - Other top-level files in `src/` are importable within the project, * but do not become exportable automatically. * - You can combine keywords in your lib.rs to re-export everything, * but you only get that one anointed lib.rs file for that purpose. * * Until now we have never used the `mod` keyword, which has made it easy * to do simple `hello world` style examples. That has meant that all the * code in each demo file is in the same namespace. Everything is visible * to everything else, which has made everything easy, hooray! But at the * same time, *nothing* has been exportable outside of the single main.rs * files, which means *none* of that code can ever be re-used, booo! * * The `mod` keyword is thus a double edged sword. As soon as you start * using it: * * - you suddenly have to deal with public vs private and _access_ issues * - entities can now be *re-used* by other files that import the module * * Obviously that's a necessary trade-off for anything non-trivial, so let's * roll up our sleeves and get familiar with it! * * This file is our one (1) `main.rs` for the project, so it will be our * one (1) executable. However, we will make reference to four (4) other * _modules_ for this project, with differing access strategies. * * - `foo`, a module defined right here inline with this file * But defining modules within `main.rs` is too trivial to be useful. * - `spam`, a module defined in a sibling file (./spam.rs) all by itself * This pattern is probably all you would need for small projects. * - `sounds`, a module defined in a sibling file (./sounds.rs) but which * also has with associated subdirectories. This example comes from the * official Rust book, but I don't like the dual use of a sounds.rs file * with a./sounds/ directory. There is implicit magic here which I dislike. * - `things`, a module defined in a sibling directory with an internal `mod.rs` * file. This pattern comes from the Blandy & Orendorff book, and I like * the fact that everything about it is explicit. This is the one I would * use in my own projects, but you have to be familiar with all of them! * * Finally, we'll show the use of completely external modules inside `things`. * It uses the *external* crate `rand`, the de facto standard way to generate * random values. This library is not part of the rust core, but it *was* a * part of the core long ago, and it is still maintained by the same devs who * *do* maintain the rust core. So it's as anointed as you can get without * being bundled with standard rust. * * All of those target modules are *somewhere* in the project, but none of them * are part of the default public scope of this `main.rs` file. Therefore, we * have to announce that we will be using each of them, and then provide the * correct implementation target for each. Both steps are necessary! You must * _declare_ that you're going to use a module, and then you must _implement_ * that module. The declarations take the same shape for all four modules, * but their implementations are all different. * */ // For inline examples *only*, the `declaration` and the `implementation` // take place in the same location. That's what the word "inline" means! mod foo { // you are free to define as many nested submodules as you like // but remember that *everything* is private by default! // So we have to explicitly declare the submodules as public pub mod bar { // but eventually you will need a leaf node or why bother? pub fn zug(path: &str) { println!("I am Zug; hear me roar (via a {} path!)", path); } // this function demonstrates the use of `super::` // this is the only way to reach *up* and *over* pub fn qux() { // without super::, qux cannot see up to blort // blort("qux cannot see blort directly"); // compiler error > blort not found in this scope // but this works: super::blort("message from qux"); } } // this fn is part of foo, so it c pub fn blort(msg: &str) {
// module implementations discussed above. The declaration phase is easy to // forget, because the implementations are all part of the project, and so // their source files are very close by. But you are *required* to make an // explicit declaration nontheless. If you throw in references to `crate::x::y` // without having preceded them with one of these declarations (e.g. `mod x;`), // the compiler will error out with a "failed to resolve" error message. // (So remember!!): even though these modules are local to the project, you // cannot make *undeclared* relative or absolute path references to them!! // Declare that we are looking for a `spam` module as a peer of some kind // Note that this differs from `use`, which would mean we were expecting // Cargo to find the installed library in whatever cache directory it uses. // NB: this means that a peer/sibling directory is _not_ automatically treated // as a module by default! Only peers that you declare in this way are modules. mod spam; // treat a spam peer (of some kind!) as a module // in this case, it's a file: spam.rs // and that file is self-contained, with no further path-based shenanigans // Declare that we are looking for a `sound` module as a peer of some kind. mod sound; // treat a sound peer (of some kind!) as a module // ending in semicolon instead of braces tells the compiler to find this module // In this case it is a `./sound.rs` file, which *happens* to include its own // submodule in its own subdirectory. The peer file is *definitive*, but nested // subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern // bothers me, and I much prefer the next and final one, in which we have a // top-level directory matching the module name, plus an explicit barrel file. // Declare that we are looking for a `things` module as a peer or some kind mod things; // treat a things peer (of some kind!) as a module. // in this case, it's a directory, which has an inner./mod.js file, which // acts as the one-and-only barrel file for that module. I like this approach // _much_ better than the weird one used for sound, above. Everything here is // explicit, and there is no compiler magic going on anywhere. fn main() { // module `foo` is the first and simplest example, since it is inline. // we can get to the inline `foo` module two ways: // Via an absolute path, starting with the language-level keyword `crate` crate::foo::bar::zug("absolute"); // Or via *relative* path, where anything that is a peer of `main` // can be used as the top of the path. foo::bar::zug("relative"); // NB: you could also start with `super::` to back up one level // There is no need for a `sub::`, because that's what you're doing with `::`! foo::blort("message from main"); // call blort directly foo::bar::qux(); // qux also calls blort, via super:: shenanigans // module `spam` is the second-simplest example. It points to an all-in-one // peer file whose contents are eerily similar to our inline foo, above. crate::spam::eggs::toast("absolute"); spam::eggs::toast("relative"); // and here is the same super:: demo we did with foo, but for spam spam::beans("message from main"); spam::eggs::ham(); // Then the `sound` module uses a weird pattern where there is // both a `./sound.rs` peer file, and a `./sound/` peer directory. // The weirdest part is that the `sound.rs` peer file is allowed to // refer to the `instrument` file without specifying the true path: // there's just an implicit automagic compiler leap where it knows to // look for a./sound/instrument.rs` file. This bothers me a lot! crate::sound::instrument::clarinet("absolute"); // But once you get path that irritant, you can do the same absolute vs // relative thing that we've demonstrated for everytone else. sound::instrument::clarinet("relative"); // module `things` shows a more-scalable approach to modules // There is no `things.rs`, but there *is* a./things/ peer directory // and that directory has a `mod.rs` file, which acts as the top level // file for the module, much like `index.js` does in a node project. crate::things::greet(); // use things via relative path let stuff = things::assortment(); println!("An assortment of things: {:?}", stuff); // accessing nested modules can get verbose! let dog = things::animal::Animal::new("Rover"); println!("Rover says 'ruff ruff': {:?}", dog); // use the `use` keyword to allow terser access use crate::things::mineral::Mineral; // the final segment is now in scope as is let coal = Mineral::new("Coal, ick!"); println!("Hi! I cause global warming!: {:?}", coal); // the `as` option allows you to avoid namespace collisions if necessary use crate::things::vegetable::Vegetable as Plant; let oak = Plant::new("oak"); println!("From a tiny acorn did I grow: {:?}", oak); } // there are still plenty of other little details to review in the article // in the main Rust book. This is a big topic, because it is _important_! // For example, you can `pub use` to re-export under shorter names, and there // are import syntaxes to condense multiple imports from sub-branches of the // same overall module. And there's a wildcard glob '*' to import everything // from a module, complete with the usual warnings about how that can be a bad // thing, because it makes it much harder to trace relationships. // TODO: go back and re-read the whole chapter, seriously!
println!("Blort says: {}", msg); } } // next, the series of *declarations*, each of which points to one of the
random_line_split
validate.rs
use crate::{ config::{self, Config, ConfigDiff}, topology::{self, builder::Pieces}, }; use colored::*; use exitcode::ExitCode; use std::collections::HashMap; use std::{fmt, fs::remove_dir_all, path::PathBuf}; use structopt::StructOpt; const TEMPORARY_DIRECTORY: &str = "validate_tmp"; #[derive(StructOpt, Debug)] #[structopt(rename_all = "kebab-case")] pub struct Opts { /// Disables environment checks. That includes component checks and health checks. #[structopt(long)] no_environment: bool, /// Fail validation on warnings that are probably a mistake in the configuration /// or are recommended to be fixed. #[structopt(short, long)] deny_warnings: bool, /// Vector config files in TOML format to validate. #[structopt( name = "config-toml", long, env = "VECTOR_CONFIG_TOML", use_delimiter(true) )] paths_toml: Vec<PathBuf>, /// Vector config files in JSON format to validate. #[structopt( name = "config-json", long, env = "VECTOR_CONFIG_JSON", use_delimiter(true) )] paths_json: Vec<PathBuf>, /// Vector config files in YAML format to validate. #[structopt( name = "config-yaml", long, env = "VECTOR_CONFIG_YAML", use_delimiter(true) )] paths_yaml: Vec<PathBuf>, /// Any number of Vector config files to validate. /// Format is detected from the file name. /// If none are specified the default config path `/etc/vector/vector.toml` /// will be targeted. #[structopt(env = "VECTOR_CONFIG", use_delimiter(true))] paths: Vec<PathBuf>, /// Read configuration from files in one or more directories. /// File format is detected from the file name. /// /// Files not ending in.toml,.json,.yaml, or.yml will be ignored. #[structopt( name = "config-dir", short = "C", long, env = "VECTOR_CONFIG_DIR", use_delimiter(true) )] pub config_dirs: Vec<PathBuf>, } impl Opts { fn paths_with_formats(&self) -> Vec<config::ConfigPath>
} /// Performs topology, component, and health checks. pub async fn validate(opts: &Opts, color: bool) -> ExitCode { let mut fmt = Formatter::new(color); let mut validated = true; let mut config = match validate_config(opts, &mut fmt) { Some(config) => config, None => return exitcode::CONFIG, }; if!opts.no_environment { if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) { validated &= validate_environment(opts, &config, &mut fmt).await; remove_tmp_directory(tmp_directory); } else { validated = false; } } if validated { fmt.validated(); exitcode::OK } else { exitcode::CONFIG } } fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> { // Prepare paths let paths = opts.paths_with_formats(); let paths = if let Some(paths) = config::process_paths(&paths) { paths } else { fmt.error("No config file paths"); return None; }; // Load let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect(); let mut report_error = |errors| { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; config::init_log_schema(&paths, true) .map_err(&mut report_error) .ok()?; let (builder, load_warnings) = config::load_builder_from_paths(&paths) .map_err(&mut report_error) .ok()?; // Build let (config, build_warnings) = builder .build_with_warnings() .map_err(&mut report_error) .ok()?; // Warnings let warnings = load_warnings .into_iter() .chain(build_warnings) .collect::<Vec<_>>(); if!warnings.is_empty() { if opts.deny_warnings { report_error(warnings); return None; } fmt.title(format!("Loaded with warnings {:?}", &paths_list)); fmt.sub_warning(warnings); } else { fmt.success(format!("Loaded {:?}", &paths_list)); } Some(config) } async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool { let diff = ConfigDiff::initial(config); let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await { pieces } else { return false; }; validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await } async fn validate_components( config: &Config, diff: &ConfigDiff, fmt: &mut Formatter, ) -> Option<Pieces> { match topology::builder::build_pieces(config, diff, HashMap::new()).await { Ok(pieces) => { fmt.success("Component configuration"); Some(pieces) } Err(errors) => { fmt.title("Component errors"); fmt.sub_error(errors); None } } } async fn validate_healthchecks( opts: &Opts, config: &Config, diff: &ConfigDiff, pieces: &mut Pieces, fmt: &mut Formatter, ) -> bool { if!config.healthchecks.enabled { fmt.warning("Health checks are disabled"); return!opts.deny_warnings; } let healthchecks = topology::take_healthchecks(diff, pieces); // We are running health checks in serial so it's easier for the users // to parse which errors/warnings/etc. belong to which healthcheck. let mut validated = true; for (name, healthcheck) in healthchecks { let mut failed = |error| { validated = false; fmt.error(error); }; match tokio::spawn(healthcheck).await { Ok(Ok(_)) => { if config .sinks .get(&name) .expect("Sink not present") .healthcheck() .enabled { fmt.success(format!("Health check `{}`", name.as_str())); } else { fmt.warning(format!("Health check disabled for `{}`", name)); validated &=!opts.deny_warnings; } } Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())), Err(error) if error.is_cancelled() => failed(format!( "Health check for `{}` was cancelled", name.as_str() )), Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())), } } validated } /// For data directory that we write to: /// 1. Create a tmp directory in it. /// 2. Change config to point to that tmp directory. fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> { match config .global .resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY) { Ok(path) => { config.global.data_dir = Some(path.clone()); Some(path) } Err(error) => { fmt.error(format!("{}", error)); None } } } fn remove_tmp_directory(path: PathBuf) { if let Err(error) = remove_dir_all(&path) { error!(message = "Failed to remove temporary directory.", path =?path, %error); } } struct Formatter { /// Width of largest printed line max_line_width: usize, /// Can empty line be printed print_space: bool, color: bool, // Intros error_intro: String, warning_intro: String, success_intro: String, } impl Formatter { fn new(color: bool) -> Self { Self { max_line_width: 0, print_space: false, error_intro: if color { format!("{}", "x".red()) } else { "x".to_owned() }, warning_intro: if color { format!("{}", "~".yellow()) } else { "~".to_owned() }, success_intro: if color { format!("{}", "√".green()) } else { "√".to_owned() }, color, } } /// Final confirmation that validation process was successful. fn validated(&self) { println!("{:-^width$}", "", width = self.max_line_width); if self.color { // Coloring needs to be used directly so that print // infrastructure correctly determines length of the // "Validated". Otherwise, ansi escape coloring is // calculated into the length. println!( "{:>width$}", "Validated".green(), width = self.max_line_width ); } else { println!("{:>width$}", "Validated", width = self.max_line_width) } } /// Standalone line fn success(&mut self, msg: impl AsRef<str>) { self.print(format!("{} {}\n", self.success_intro, msg.as_ref())) } /// Standalone line fn warning(&mut self, warning: impl AsRef<str>) { self.print(format!("{} {}\n", self.warning_intro, warning.as_ref())) } /// Standalone line fn error(&mut self, error: impl AsRef<str>) { self.print(format!("{} {}\n", self.error_intro, error.as_ref())) } /// Marks sub fn title(&mut self, title: impl AsRef<str>) { self.space(); self.print(format!( "{}\n{:-<width$}\n", title.as_ref(), "", width = title.as_ref().len() )) } /// A list of warnings that go with a title. fn sub_warning<I: IntoIterator>(&mut self, warnings: I) where I::Item: fmt::Display, { self.sub(self.warning_intro.clone(), warnings) } /// A list of errors that go with a title. fn sub_error<I: IntoIterator>(&mut self, errors: I) where I::Item: fmt::Display, { self.sub(self.error_intro.clone(), errors) } fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I) where I::Item: fmt::Display, { for msg in msgs { self.print(format!("{} {}\n", intro.as_ref(), msg)); } self.space(); } /// Prints empty space if necessary. fn space(&mut self) { if self.print_space { self.print_space = false; println!(); } } fn print(&mut self, print: impl AsRef<str>) { let width = print .as_ref() .lines() .map(|line| { String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap()) .chars() .count() }) .max() .unwrap_or(0); self.max_line_width = width.max(self.max_line_width); self.print_space = true; print!("{}", print.as_ref()) } }
{ config::merge_path_lists(vec![ (&self.paths, None), (&self.paths_toml, Some(config::Format::Toml)), (&self.paths_json, Some(config::Format::Json)), (&self.paths_yaml, Some(config::Format::Yaml)), ]) .map(|(path, hint)| config::ConfigPath::File(path, hint)) .chain( self.config_dirs .iter() .map(|dir| config::ConfigPath::Dir(dir.to_path_buf())), ) .collect() }
identifier_body
validate.rs
use crate::{ config::{self, Config, ConfigDiff}, topology::{self, builder::Pieces}, }; use colored::*; use exitcode::ExitCode; use std::collections::HashMap; use std::{fmt, fs::remove_dir_all, path::PathBuf}; use structopt::StructOpt; const TEMPORARY_DIRECTORY: &str = "validate_tmp"; #[derive(StructOpt, Debug)] #[structopt(rename_all = "kebab-case")] pub struct Opts { /// Disables environment checks. That includes component checks and health checks. #[structopt(long)] no_environment: bool, /// Fail validation on warnings that are probably a mistake in the configuration /// or are recommended to be fixed. #[structopt(short, long)] deny_warnings: bool, /// Vector config files in TOML format to validate. #[structopt( name = "config-toml", long, env = "VECTOR_CONFIG_TOML", use_delimiter(true) )] paths_toml: Vec<PathBuf>, /// Vector config files in JSON format to validate. #[structopt( name = "config-json", long, env = "VECTOR_CONFIG_JSON", use_delimiter(true) )] paths_json: Vec<PathBuf>, /// Vector config files in YAML format to validate. #[structopt( name = "config-yaml", long, env = "VECTOR_CONFIG_YAML", use_delimiter(true) )] paths_yaml: Vec<PathBuf>, /// Any number of Vector config files to validate. /// Format is detected from the file name. /// If none are specified the default config path `/etc/vector/vector.toml` /// will be targeted. #[structopt(env = "VECTOR_CONFIG", use_delimiter(true))] paths: Vec<PathBuf>, /// Read configuration from files in one or more directories. /// File format is detected from the file name. /// /// Files not ending in.toml,.json,.yaml, or.yml will be ignored. #[structopt( name = "config-dir", short = "C", long, env = "VECTOR_CONFIG_DIR", use_delimiter(true) )] pub config_dirs: Vec<PathBuf>, } impl Opts { fn paths_with_formats(&self) -> Vec<config::ConfigPath> { config::merge_path_lists(vec![ (&self.paths, None), (&self.paths_toml, Some(config::Format::Toml)), (&self.paths_json, Some(config::Format::Json)), (&self.paths_yaml, Some(config::Format::Yaml)), ]) .map(|(path, hint)| config::ConfigPath::File(path, hint)) .chain( self.config_dirs .iter() .map(|dir| config::ConfigPath::Dir(dir.to_path_buf())), ) .collect() } } /// Performs topology, component, and health checks. pub async fn validate(opts: &Opts, color: bool) -> ExitCode { let mut fmt = Formatter::new(color); let mut validated = true; let mut config = match validate_config(opts, &mut fmt) { Some(config) => config, None => return exitcode::CONFIG, }; if!opts.no_environment { if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) { validated &= validate_environment(opts, &config, &mut fmt).await; remove_tmp_directory(tmp_directory); } else { validated = false; } } if validated { fmt.validated(); exitcode::OK } else { exitcode::CONFIG } } fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> { // Prepare paths let paths = opts.paths_with_formats(); let paths = if let Some(paths) = config::process_paths(&paths) { paths } else { fmt.error("No config file paths"); return None; }; // Load let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect(); let mut report_error = |errors| { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; config::init_log_schema(&paths, true) .map_err(&mut report_error) .ok()?; let (builder, load_warnings) = config::load_builder_from_paths(&paths) .map_err(&mut report_error) .ok()?; // Build let (config, build_warnings) = builder .build_with_warnings() .map_err(&mut report_error) .ok()?; // Warnings let warnings = load_warnings .into_iter() .chain(build_warnings) .collect::<Vec<_>>(); if!warnings.is_empty() { if opts.deny_warnings { report_error(warnings); return None; } fmt.title(format!("Loaded with warnings {:?}", &paths_list)); fmt.sub_warning(warnings); } else { fmt.success(format!("Loaded {:?}", &paths_list)); } Some(config) } async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool { let diff = ConfigDiff::initial(config); let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await { pieces } else { return false; }; validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await } async fn validate_components( config: &Config, diff: &ConfigDiff, fmt: &mut Formatter, ) -> Option<Pieces> { match topology::builder::build_pieces(config, diff, HashMap::new()).await { Ok(pieces) => { fmt.success("Component configuration"); Some(pieces) } Err(errors) => { fmt.title("Component errors"); fmt.sub_error(errors); None } } } async fn validate_healthchecks( opts: &Opts, config: &Config, diff: &ConfigDiff, pieces: &mut Pieces, fmt: &mut Formatter, ) -> bool { if!config.healthchecks.enabled { fmt.warning("Health checks are disabled"); return!opts.deny_warnings; } let healthchecks = topology::take_healthchecks(diff, pieces); // We are running health checks in serial so it's easier for the users // to parse which errors/warnings/etc. belong to which healthcheck. let mut validated = true; for (name, healthcheck) in healthchecks { let mut failed = |error| { validated = false; fmt.error(error); }; match tokio::spawn(healthcheck).await { Ok(Ok(_)) => { if config .sinks .get(&name) .expect("Sink not present") .healthcheck() .enabled { fmt.success(format!("Health check `{}`", name.as_str())); } else { fmt.warning(format!("Health check disabled for `{}`", name)); validated &=!opts.deny_warnings; } } Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())), Err(error) if error.is_cancelled() => failed(format!( "Health check for `{}` was cancelled", name.as_str() )), Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())), } } validated } /// For data directory that we write to: /// 1. Create a tmp directory in it. /// 2. Change config to point to that tmp directory. fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> { match config .global .resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY) { Ok(path) => { config.global.data_dir = Some(path.clone()); Some(path) } Err(error) => { fmt.error(format!("{}", error)); None } } } fn remove_tmp_directory(path: PathBuf) { if let Err(error) = remove_dir_all(&path) { error!(message = "Failed to remove temporary directory.", path =?path, %error); } } struct Formatter { /// Width of largest printed line max_line_width: usize, /// Can empty line be printed print_space: bool, color: bool, // Intros error_intro: String, warning_intro: String, success_intro: String, } impl Formatter { fn new(color: bool) -> Self { Self { max_line_width: 0, print_space: false, error_intro: if color { format!("{}", "x".red()) } else { "x".to_owned() }, warning_intro: if color { format!("{}", "~".yellow()) } else { "~".to_owned() }, success_intro: if color { format!("{}", "√".green()) } else { "√".to_owned() }, color, } } /// Final confirmation that validation process was successful. fn validated(&self) { println!("{:-^width$}", "", width = self.max_line_width); if self.color { // Coloring needs to be used directly so that print // infrastructure correctly determines length of the // "Validated". Otherwise, ansi escape coloring is // calculated into the length. println!( "{:>width$}", "Validated".green(), width = self.max_line_width ); } else { println!("{:>width$}", "Validated", width = self.max_line_width) } } /// Standalone line fn success(&mut self, msg: impl AsRef<str>) { self.print(format!("{} {}\n", self.success_intro, msg.as_ref())) } /// Standalone line fn warning(&mut self, warning: impl AsRef<str>) { self.print(format!("{} {}\n", self.warning_intro, warning.as_ref())) } /// Standalone line fn error(&mut self, error: impl AsRef<str>) { self.print(format!("{} {}\n", self.error_intro, error.as_ref())) } /// Marks sub fn title(&mut self, title: impl AsRef<str>) { self.space(); self.print(format!( "{}\n{:-<width$}\n", title.as_ref(), "", width = title.as_ref().len() )) } /// A list of warnings that go with a title. fn sub_warning<I: IntoIterator>(&mut self, warnings: I) where I::Item: fmt::Display, { self.sub(self.warning_intro.clone(), warnings) } /// A list of errors that go with a title. fn sub_error<I: IntoIterator>(&mut self, errors: I) where I::Item: fmt::Display, { self.sub(self.error_intro.clone(), errors) } fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I) where I::Item: fmt::Display, { for msg in msgs { self.print(format!("{} {}\n", intro.as_ref(), msg)); } self.space(); } /// Prints empty space if necessary. fn spac
t self) { if self.print_space { self.print_space = false; println!(); } } fn print(&mut self, print: impl AsRef<str>) { let width = print .as_ref() .lines() .map(|line| { String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap()) .chars() .count() }) .max() .unwrap_or(0); self.max_line_width = width.max(self.max_line_width); self.print_space = true; print!("{}", print.as_ref()) } }
e(&mu
identifier_name
validate.rs
use crate::{ config::{self, Config, ConfigDiff}, topology::{self, builder::Pieces}, }; use colored::*; use exitcode::ExitCode; use std::collections::HashMap; use std::{fmt, fs::remove_dir_all, path::PathBuf}; use structopt::StructOpt; const TEMPORARY_DIRECTORY: &str = "validate_tmp"; #[derive(StructOpt, Debug)] #[structopt(rename_all = "kebab-case")] pub struct Opts { /// Disables environment checks. That includes component checks and health checks. #[structopt(long)] no_environment: bool, /// Fail validation on warnings that are probably a mistake in the configuration /// or are recommended to be fixed. #[structopt(short, long)] deny_warnings: bool, /// Vector config files in TOML format to validate. #[structopt( name = "config-toml", long, env = "VECTOR_CONFIG_TOML", use_delimiter(true) )] paths_toml: Vec<PathBuf>, /// Vector config files in JSON format to validate. #[structopt( name = "config-json", long, env = "VECTOR_CONFIG_JSON", use_delimiter(true) )] paths_json: Vec<PathBuf>, /// Vector config files in YAML format to validate. #[structopt( name = "config-yaml", long, env = "VECTOR_CONFIG_YAML", use_delimiter(true) )] paths_yaml: Vec<PathBuf>, /// Any number of Vector config files to validate. /// Format is detected from the file name. /// If none are specified the default config path `/etc/vector/vector.toml` /// will be targeted. #[structopt(env = "VECTOR_CONFIG", use_delimiter(true))] paths: Vec<PathBuf>, /// Read configuration from files in one or more directories. /// File format is detected from the file name. /// /// Files not ending in.toml,.json,.yaml, or.yml will be ignored. #[structopt( name = "config-dir", short = "C", long, env = "VECTOR_CONFIG_DIR", use_delimiter(true) )] pub config_dirs: Vec<PathBuf>, } impl Opts { fn paths_with_formats(&self) -> Vec<config::ConfigPath> { config::merge_path_lists(vec![ (&self.paths, None), (&self.paths_toml, Some(config::Format::Toml)), (&self.paths_json, Some(config::Format::Json)), (&self.paths_yaml, Some(config::Format::Yaml)), ]) .map(|(path, hint)| config::ConfigPath::File(path, hint)) .chain( self.config_dirs .iter() .map(|dir| config::ConfigPath::Dir(dir.to_path_buf())), ) .collect() } } /// Performs topology, component, and health checks. pub async fn validate(opts: &Opts, color: bool) -> ExitCode { let mut fmt = Formatter::new(color); let mut validated = true; let mut config = match validate_config(opts, &mut fmt) { Some(config) => config, None => return exitcode::CONFIG, }; if!opts.no_environment { if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) { validated &= validate_environment(opts, &config, &mut fmt).await; remove_tmp_directory(tmp_directory); } else { validated = false; } } if validated { fmt.validated(); exitcode::OK } else { exitcode::CONFIG } } fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> { // Prepare paths let paths = opts.paths_with_formats(); let paths = if let Some(paths) = config::process_paths(&paths) { paths } else { fmt.error("No config file paths"); return None; }; // Load let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect(); let mut report_error = |errors| { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; config::init_log_schema(&paths, true) .map_err(&mut report_error) .ok()?; let (builder, load_warnings) = config::load_builder_from_paths(&paths) .map_err(&mut report_error) .ok()?; // Build let (config, build_warnings) = builder .build_with_warnings() .map_err(&mut report_error) .ok()?; // Warnings let warnings = load_warnings .into_iter() .chain(build_warnings) .collect::<Vec<_>>(); if!warnings.is_empty() { if opts.deny_warnings { report_error(warnings); return None; } fmt.title(format!("Loaded with warnings {:?}", &paths_list)); fmt.sub_warning(warnings); } else { fmt.success(format!("Loaded {:?}", &paths_list)); } Some(config) } async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool { let diff = ConfigDiff::initial(config); let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await { pieces } else { return false; }; validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await } async fn validate_components( config: &Config, diff: &ConfigDiff, fmt: &mut Formatter, ) -> Option<Pieces> { match topology::builder::build_pieces(config, diff, HashMap::new()).await { Ok(pieces) => { fmt.success("Component configuration"); Some(pieces) } Err(errors) => { fmt.title("Component errors"); fmt.sub_error(errors); None } } } async fn validate_healthchecks( opts: &Opts, config: &Config, diff: &ConfigDiff, pieces: &mut Pieces, fmt: &mut Formatter, ) -> bool { if!config.healthchecks.enabled { fmt.warning("Health checks are disabled"); return!opts.deny_warnings; } let healthchecks = topology::take_healthchecks(diff, pieces); // We are running health checks in serial so it's easier for the users // to parse which errors/warnings/etc. belong to which healthcheck. let mut validated = true; for (name, healthcheck) in healthchecks { let mut failed = |error| { validated = false; fmt.error(error); }; match tokio::spawn(healthcheck).await { Ok(Ok(_)) => { if config .sinks .get(&name) .expect("Sink not present") .healthcheck() .enabled { fmt.success(format!("Health check `{}`", name.as_str())); } else { fmt.warning(format!("Health check disabled for `{}`", name)); validated &=!opts.deny_warnings; } } Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())), Err(error) if error.is_cancelled() => failed(format!( "Health check for `{}` was cancelled", name.as_str() )), Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())), } } validated } /// For data directory that we write to: /// 1. Create a tmp directory in it. /// 2. Change config to point to that tmp directory. fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> { match config .global .resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY) { Ok(path) => { config.global.data_dir = Some(path.clone()); Some(path) } Err(error) => { fmt.error(format!("{}", error)); None } } } fn remove_tmp_directory(path: PathBuf) { if let Err(error) = remove_dir_all(&path) { error!(message = "Failed to remove temporary directory.", path =?path, %error); } } struct Formatter { /// Width of largest printed line max_line_width: usize, /// Can empty line be printed print_space: bool, color: bool, // Intros error_intro: String, warning_intro: String, success_intro: String, } impl Formatter { fn new(color: bool) -> Self { Self { max_line_width: 0, print_space: false, error_intro: if color { format!("{}", "x".red()) } else { "x".to_owned() }, warning_intro: if color { format!("{}", "~".yellow()) } else { "~".to_owned() }, success_intro: if color { format!("{}", "√".green()) } else { "√".to_owned() }, color, } } /// Final confirmation that validation process was successful. fn validated(&self) { println!("{:-^width$}", "", width = self.max_line_width); if self.color { // Coloring needs to be used directly so that print // infrastructure correctly determines length of the // "Validated". Otherwise, ansi escape coloring is // calculated into the length. println!( "{:>width$}", "Validated".green(), width = self.max_line_width );
} /// Standalone line fn success(&mut self, msg: impl AsRef<str>) { self.print(format!("{} {}\n", self.success_intro, msg.as_ref())) } /// Standalone line fn warning(&mut self, warning: impl AsRef<str>) { self.print(format!("{} {}\n", self.warning_intro, warning.as_ref())) } /// Standalone line fn error(&mut self, error: impl AsRef<str>) { self.print(format!("{} {}\n", self.error_intro, error.as_ref())) } /// Marks sub fn title(&mut self, title: impl AsRef<str>) { self.space(); self.print(format!( "{}\n{:-<width$}\n", title.as_ref(), "", width = title.as_ref().len() )) } /// A list of warnings that go with a title. fn sub_warning<I: IntoIterator>(&mut self, warnings: I) where I::Item: fmt::Display, { self.sub(self.warning_intro.clone(), warnings) } /// A list of errors that go with a title. fn sub_error<I: IntoIterator>(&mut self, errors: I) where I::Item: fmt::Display, { self.sub(self.error_intro.clone(), errors) } fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I) where I::Item: fmt::Display, { for msg in msgs { self.print(format!("{} {}\n", intro.as_ref(), msg)); } self.space(); } /// Prints empty space if necessary. fn space(&mut self) { if self.print_space { self.print_space = false; println!(); } } fn print(&mut self, print: impl AsRef<str>) { let width = print .as_ref() .lines() .map(|line| { String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap()) .chars() .count() }) .max() .unwrap_or(0); self.max_line_width = width.max(self.max_line_width); self.print_space = true; print!("{}", print.as_ref()) } }
} else { println!("{:>width$}", "Validated", width = self.max_line_width) }
random_line_split
mod.rs
#![allow(clippy::pub_enum_variant_names)] use std::collections::HashMap; use serde::{Serialize, Serializer}; extern crate snowflake; pub use std::sync::Arc; use crate::ast::*; use crate::externals::{External, ArgumentType, EXTERNALS}; mod intexp; mod opexp; mod recordexp; mod seqexp; mod assignexp; mod ifexp; mod whileexp; mod forexp; mod letexp; mod arrayexp; mod varexp; mod nilexp; mod unitexp; mod stringexp; mod callexp; mod breakexp; #[derive(Debug, PartialEq, Clone, Serialize)] /// Write permissions for an int value pub enum R { /// Read-only RO, /// Read-write RW } /// Unique identifier for Records and Arrays pub type TypeId = snowflake::ProcessUniqueId; /// Generate new type id for a Record or Array pub fn newtypeid() -> TypeId { snowflake::ProcessUniqueId::new() } /// Types in the Tiger language #[derive(Debug, Clone)] pub enum TigerType { /// as in `()` TUnit, /// as in `nil` TNil, /// as in `3` TInt(R), /// as in `"perro` TString, /// as in `arrtype1 [10] of 0` TArray(Arc<TigerType>, TypeId), /// as in `{name : string, address : string, id : int, age : int}` TRecord(Vec<(Symbol, RecordFieldType, i32)>, TypeId), /// Type synonym Internal(String), /// This struct still has not been typed yet. The parser gives this type to all nodes in the AST Untyped, } #[derive(Debug, Clone)] pub enum RecordFieldType { Record(TypeId), Type(Arc::<TigerType>) } impl PartialEq for RecordFieldType { fn eq(&self, other: &Self) -> bool { use RecordFieldType::*; match (self, other) { (Record(id1), Record(id2)) => id1 == id2, (Record(..), Type(t2)) => if let TigerType::TNil = **t2 { true } else { false }, (Type(t1), Record(..)) => if let TigerType::TNil = **t1 { true } else { false }, (Type(t1), Type(t2)) => t1 == t2, } } } impl Serialize for TigerType { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { TigerType::TUnit => { serializer.serialize_str("Unit") } TigerType::TNil => { serializer.serialize_str("Nil") } TigerType::TString => { serializer.serialize_str("String") } TigerType::TInt(..) => { serializer.serialize_str("Int") } TigerType::TArray(..) => { serializer.serialize_str("Array") } TigerType::TRecord(..) => { serializer.serialize_str("Record") } TigerType::Internal(..) => { serializer.serialize_str("Internal") } TigerType::Untyped => { serializer.serialize_str("Untyped") } } } } /// Converts an internal type to the logical type pub fn tipo_real(t: Arc<TigerType>, tenv: &TypeEnviroment) -> Arc<TigerType> { match &*t { TigerType::Internal(s) => match tenv.get(s) { Some(tipo) => Arc::clone(&tipo), None => panic!("Undefined") }, _ => t } } /// Returns true iif the type is an Int pub fn es_int(t: &TigerType) -> bool { if let TigerType::TInt(_) = *t { true } else { false } } /// An entry in our `TypeEnviroment` table. #[derive(Clone, Debug)] pub enum EnvEntry { /// A declared varaible Var { /// The type of the variable ty: Arc<TigerType>, }, /// A declared function Func { /// The types of the arguments of the function formals: Vec<Arc<TigerType>>, /// The type of the return value of the function result: Arc<TigerType>, } } /// A table where we store the types that are declared as this point in typechecking. /// /// When a type is used in a declaration, we look in this table and raise a `TypeError` if it's not found. type TypeEnviroment = HashMap<Symbol, Arc<TigerType>>; /// A table where we store the values that are declared as this point in typechecking. /// /// When a variable or function is used somewhere in the code, we check this table and raise `TypeError` if it's not found. type ValueEnviroment = HashMap<Symbol, EnvEntry>; /// Generate a `TypeEnv` that contains integers and strings fn initial_type_env() -> TypeEnviroment { vec![ (Symbol::from("int"), Arc::new(TigerType::TInt(R::RW))), (Symbol::from("string"), Arc::new(TigerType::TString)) ] .into_iter() .collect() } impl From<ArgumentType> for TigerType { fn from(arg: ArgumentType) -> Self { match arg { ArgumentType::String => TigerType::TString, ArgumentType::Int => TigerType::TInt(R::RO) } } } fn initial_value_env() -> ValueEnviroment { EXTERNALS .iter() .filter(|External {is_runtime,..}|!is_runtime) .map(|External {name, arguments, return_value,..}| ((*name).to_string(), EnvEntry::Func { formals: arguments .iter() .map(|arg| Arc::new(TigerType::from(*arg))) .collect(), result: if let Some(rt) = return_value { Arc::new(TigerType::from(*rt)) } else { Arc::new(TigerType::TUnit) } })) .collect() } /// Errors that the typechecker can fail with. #[derive(Debug, Clone, Serialize)] pub enum TypeError { /// Using variable that was not declared. UndeclaredSimpleVar(Pos), /// Using function that was not declared. UndeclaredFunction(Pos), /// Using type that was not declared. UndeclaredType(Pos), /// Using a field from a record that was not declared UndeclaredField(Pos), /// Tried to use an array or record as a simple variable NotSimpleVar(Pos), /// Tried to do a function call on a variable NotFunctionVar(Pos), /// Tried to access a record field on something other than a record NotRecordType(Pos), /// Tried to index something other than an array NotArrayType(Pos), /// Called a function with too many arguments TooManyArguments(Pos), /// Called a function with too few arguments TooFewArguments(Pos), /// Expected a different type TypeMismatch(Pos), /// An if-then-else with different types for each branch ThenElseTypeMismatch(Pos), /// Assigning to an Int with `R::RO` ReadOnlyAssignment(Pos), /// The bodies of for, while or if whithout else statements should type to Unit NonUnitBody(Pos), /// Type mismatch in function call argument InvalidCallArgument(Pos), /// A definition is not defining values for all record fields. MissingRecordField(Pos), /// The sizes of array definitions should type to Int NonIntegerSize(Pos), /// All conditionals should type to Int NonIntegerCondition(Pos), /// The range boundaries of for expressions should type to Int NonIntegerForRange(Pos), /// Integer operation over non integer operands NonIntegerOperand(Pos), /// The subscript of a field varaible should type to Int NonIntegerSubscript(Pos), /// Type declarations form an illicit cycle TypeCycle(Pos), /// Something is declared twice in the same block DuplicatedDeclarations(Pos), /// You can only assign nil to variables with explicit type UnconstrainedNilInitialization(Pos), /// All tiger programs should return something of Int type. NonIntegerProgram(Pos) } impl PartialEq for TigerType { fn eq(&self, other: &Self) -> bool { use TigerType::*; match (self, other) { (TUnit, TUnit) | (TString, TString) | (TRecord(_, _), TNil) | (TNil, TRecord(_, _)) | (TInt(_),TInt(_)) => true, (TRecord(_, uid1), TRecord(_, uid2 )) | (TArray(_, uid1), TArray(_, uid2)) => uid1 == uid2, (Internal(s), Internal(t)) => s == t, (Internal(_), _) => panic!("Estamos comparando un Internal"), (_, Internal(_)) => panic!("Estamos comparando un Internal"), (_, _) => false, } } } /// Rebuild an `AST` with the correct types given the context in the enviroments or return a `TypeError` fn type_exp(ast : AST, type_env : &TypeEnviroment, value_env: &ValueEnviroment) -> Result<AST, TypeError> { let AST {node,..} = &ast; match node { Exp::Var(..) => varexp::typecheck(ast, type_env, value_env), Exp::Unit => unitexp::typecheck(ast, type_env, value_env), Exp::Nil => nilexp::typecheck(ast, type_env, value_env), Exp::Int(..) => intexp::typecheck(ast, type_env,&value_env), Exp::String(..) => stringexp::typecheck(ast, type_env, value_env), Exp::Call{..} => callexp::typecheck(ast, type_env, value_env), Exp::Op{..} => opexp::typecheck(ast,&type_env, value_env), Exp::Assign{..} => assignexp::typecheck(ast, type_env, value_env), Exp::Record{..} => recordexp::typecheck(ast, type_env, value_env), Exp::Seq(..) => seqexp::typecheck(ast, type_env, value_env), Exp::If{..} => ifexp::typecheck(ast, type_env, value_env), Exp::While{..} => whileexp::typecheck(ast, type_env, value_env), Exp::For{..} => forexp::typecheck(ast, type_env, value_env), Exp::Let{..} => letexp::typecheck(ast, type_env, value_env), Exp::Break => breakexp::typecheck(ast, type_env, value_env), Exp::Array{..} => arrayexp::typecheck(ast, type_env, value_env), } } /// Typecheck the program pub fn
(ast : AST) -> Result<AST, TypeError> { let typed_ast = type_exp(ast, &initial_type_env(), &initial_value_env())?; if *typed_ast.typ == TigerType::TInt(R::RW) { Ok(typed_ast) } else { Err(TypeError::NonIntegerProgram(typed_ast.pos)) } }
typecheck
identifier_name
mod.rs
#![allow(clippy::pub_enum_variant_names)] use std::collections::HashMap; use serde::{Serialize, Serializer}; extern crate snowflake; pub use std::sync::Arc; use crate::ast::*; use crate::externals::{External, ArgumentType, EXTERNALS}; mod intexp; mod opexp; mod recordexp; mod seqexp; mod assignexp; mod ifexp; mod whileexp; mod forexp; mod letexp; mod arrayexp; mod varexp; mod nilexp; mod unitexp; mod stringexp; mod callexp; mod breakexp; #[derive(Debug, PartialEq, Clone, Serialize)] /// Write permissions for an int value pub enum R { /// Read-only RO, /// Read-write RW } /// Unique identifier for Records and Arrays pub type TypeId = snowflake::ProcessUniqueId; /// Generate new type id for a Record or Array pub fn newtypeid() -> TypeId { snowflake::ProcessUniqueId::new() } /// Types in the Tiger language #[derive(Debug, Clone)] pub enum TigerType { /// as in `()` TUnit, /// as in `nil` TNil, /// as in `3` TInt(R), /// as in `"perro` TString, /// as in `arrtype1 [10] of 0` TArray(Arc<TigerType>, TypeId), /// as in `{name : string, address : string, id : int, age : int}` TRecord(Vec<(Symbol, RecordFieldType, i32)>, TypeId), /// Type synonym Internal(String), /// This struct still has not been typed yet. The parser gives this type to all nodes in the AST Untyped, } #[derive(Debug, Clone)] pub enum RecordFieldType { Record(TypeId), Type(Arc::<TigerType>) } impl PartialEq for RecordFieldType { fn eq(&self, other: &Self) -> bool { use RecordFieldType::*; match (self, other) { (Record(id1), Record(id2)) => id1 == id2, (Record(..), Type(t2)) => if let TigerType::TNil = **t2 { true } else { false }, (Type(t1), Record(..)) => if let TigerType::TNil = **t1 { true } else { false }, (Type(t1), Type(t2)) => t1 == t2, } } } impl Serialize for TigerType { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { TigerType::TUnit => { serializer.serialize_str("Unit") } TigerType::TNil => { serializer.serialize_str("Nil") } TigerType::TString => { serializer.serialize_str("String") } TigerType::TInt(..) => { serializer.serialize_str("Int") } TigerType::TArray(..) => { serializer.serialize_str("Array") } TigerType::TRecord(..) => { serializer.serialize_str("Record") } TigerType::Internal(..) => { serializer.serialize_str("Internal") } TigerType::Untyped => { serializer.serialize_str("Untyped") } } } } /// Converts an internal type to the logical type pub fn tipo_real(t: Arc<TigerType>, tenv: &TypeEnviroment) -> Arc<TigerType> { match &*t { TigerType::Internal(s) => match tenv.get(s) { Some(tipo) => Arc::clone(&tipo), None => panic!("Undefined") }, _ => t } }
} else { false } } /// An entry in our `TypeEnviroment` table. #[derive(Clone, Debug)] pub enum EnvEntry { /// A declared varaible Var { /// The type of the variable ty: Arc<TigerType>, }, /// A declared function Func { /// The types of the arguments of the function formals: Vec<Arc<TigerType>>, /// The type of the return value of the function result: Arc<TigerType>, } } /// A table where we store the types that are declared as this point in typechecking. /// /// When a type is used in a declaration, we look in this table and raise a `TypeError` if it's not found. type TypeEnviroment = HashMap<Symbol, Arc<TigerType>>; /// A table where we store the values that are declared as this point in typechecking. /// /// When a variable or function is used somewhere in the code, we check this table and raise `TypeError` if it's not found. type ValueEnviroment = HashMap<Symbol, EnvEntry>; /// Generate a `TypeEnv` that contains integers and strings fn initial_type_env() -> TypeEnviroment { vec![ (Symbol::from("int"), Arc::new(TigerType::TInt(R::RW))), (Symbol::from("string"), Arc::new(TigerType::TString)) ] .into_iter() .collect() } impl From<ArgumentType> for TigerType { fn from(arg: ArgumentType) -> Self { match arg { ArgumentType::String => TigerType::TString, ArgumentType::Int => TigerType::TInt(R::RO) } } } fn initial_value_env() -> ValueEnviroment { EXTERNALS .iter() .filter(|External {is_runtime,..}|!is_runtime) .map(|External {name, arguments, return_value,..}| ((*name).to_string(), EnvEntry::Func { formals: arguments .iter() .map(|arg| Arc::new(TigerType::from(*arg))) .collect(), result: if let Some(rt) = return_value { Arc::new(TigerType::from(*rt)) } else { Arc::new(TigerType::TUnit) } })) .collect() } /// Errors that the typechecker can fail with. #[derive(Debug, Clone, Serialize)] pub enum TypeError { /// Using variable that was not declared. UndeclaredSimpleVar(Pos), /// Using function that was not declared. UndeclaredFunction(Pos), /// Using type that was not declared. UndeclaredType(Pos), /// Using a field from a record that was not declared UndeclaredField(Pos), /// Tried to use an array or record as a simple variable NotSimpleVar(Pos), /// Tried to do a function call on a variable NotFunctionVar(Pos), /// Tried to access a record field on something other than a record NotRecordType(Pos), /// Tried to index something other than an array NotArrayType(Pos), /// Called a function with too many arguments TooManyArguments(Pos), /// Called a function with too few arguments TooFewArguments(Pos), /// Expected a different type TypeMismatch(Pos), /// An if-then-else with different types for each branch ThenElseTypeMismatch(Pos), /// Assigning to an Int with `R::RO` ReadOnlyAssignment(Pos), /// The bodies of for, while or if whithout else statements should type to Unit NonUnitBody(Pos), /// Type mismatch in function call argument InvalidCallArgument(Pos), /// A definition is not defining values for all record fields. MissingRecordField(Pos), /// The sizes of array definitions should type to Int NonIntegerSize(Pos), /// All conditionals should type to Int NonIntegerCondition(Pos), /// The range boundaries of for expressions should type to Int NonIntegerForRange(Pos), /// Integer operation over non integer operands NonIntegerOperand(Pos), /// The subscript of a field varaible should type to Int NonIntegerSubscript(Pos), /// Type declarations form an illicit cycle TypeCycle(Pos), /// Something is declared twice in the same block DuplicatedDeclarations(Pos), /// You can only assign nil to variables with explicit type UnconstrainedNilInitialization(Pos), /// All tiger programs should return something of Int type. NonIntegerProgram(Pos) } impl PartialEq for TigerType { fn eq(&self, other: &Self) -> bool { use TigerType::*; match (self, other) { (TUnit, TUnit) | (TString, TString) | (TRecord(_, _), TNil) | (TNil, TRecord(_, _)) | (TInt(_),TInt(_)) => true, (TRecord(_, uid1), TRecord(_, uid2 )) | (TArray(_, uid1), TArray(_, uid2)) => uid1 == uid2, (Internal(s), Internal(t)) => s == t, (Internal(_), _) => panic!("Estamos comparando un Internal"), (_, Internal(_)) => panic!("Estamos comparando un Internal"), (_, _) => false, } } } /// Rebuild an `AST` with the correct types given the context in the enviroments or return a `TypeError` fn type_exp(ast : AST, type_env : &TypeEnviroment, value_env: &ValueEnviroment) -> Result<AST, TypeError> { let AST {node,..} = &ast; match node { Exp::Var(..) => varexp::typecheck(ast, type_env, value_env), Exp::Unit => unitexp::typecheck(ast, type_env, value_env), Exp::Nil => nilexp::typecheck(ast, type_env, value_env), Exp::Int(..) => intexp::typecheck(ast, type_env,&value_env), Exp::String(..) => stringexp::typecheck(ast, type_env, value_env), Exp::Call{..} => callexp::typecheck(ast, type_env, value_env), Exp::Op{..} => opexp::typecheck(ast,&type_env, value_env), Exp::Assign{..} => assignexp::typecheck(ast, type_env, value_env), Exp::Record{..} => recordexp::typecheck(ast, type_env, value_env), Exp::Seq(..) => seqexp::typecheck(ast, type_env, value_env), Exp::If{..} => ifexp::typecheck(ast, type_env, value_env), Exp::While{..} => whileexp::typecheck(ast, type_env, value_env), Exp::For{..} => forexp::typecheck(ast, type_env, value_env), Exp::Let{..} => letexp::typecheck(ast, type_env, value_env), Exp::Break => breakexp::typecheck(ast, type_env, value_env), Exp::Array{..} => arrayexp::typecheck(ast, type_env, value_env), } } /// Typecheck the program pub fn typecheck(ast : AST) -> Result<AST, TypeError> { let typed_ast = type_exp(ast, &initial_type_env(), &initial_value_env())?; if *typed_ast.typ == TigerType::TInt(R::RW) { Ok(typed_ast) } else { Err(TypeError::NonIntegerProgram(typed_ast.pos)) } }
/// Returns true iif the type is an Int pub fn es_int(t: &TigerType) -> bool { if let TigerType::TInt(_) = *t { true
random_line_split
main.rs
use bulletproofs::r1cs::{ ConstraintSystem, LinearCombination, Prover, R1CSError, Variable, Verifier, }; use bulletproofs::{ BulletproofGens, PedersenGens, }; use curve25519_dalek::scalar::Scalar; use merlin::Transcript; use rand::Rng; use std::u64; struct TaxBrackets(Vec<(u64, u64)>); fn negate_bit<T>(x: T) -> LinearCombination where T: Into<LinearCombination> { LinearCombination::from(Variable::One()) - x } fn scalar_to_bits_le<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, var: LinearCombination ) -> Result<Vec<Variable>, R1CSError> { // This is a helper function that caches the evaluation of the input variable so that it // doesn't get recomputed and verified for each bit allocation. let mut cache_evaluation = { let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1; let local_var = var.clone(); let mut val_cache = None; move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> { if val_cache.is_none() { let val = eval(&local_var); let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0); val_cache = Some( if valid { Ok(val) } else { Err(R1CSError::GadgetError { description: format!("Value is not represented in {} bits", n_bits) }) } ); } val_cache.as_ref() .expect("the value must have been computed and cached by the block above") .as_ref() .map(|scalar| get_bit(scalar, i)) .map_err(|e| e.clone()) } }; let bit_vars = (0..n_bits) .map(|i| { let (lhs, rhs, out) = cs.allocate(|eval| { let bit = cache_evaluation(eval, i)?; Ok((bit.into(), (1 - bit).into(), Scalar::zero())) })?; // Enforce that lhs variable represents a bit. // b (1 - b) = 0 cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One()); cs.constrain(out.into()); Ok(lhs) }) .collect::<Result<Vec<_>, _>>()?; let two_powers = (0..n_bits).map(|i| { let mut two_power_repr = [0u8; 32]; two_power_repr[i >> 3] |= 1 << (i & 7); Scalar::from_bits(two_power_repr) }); let bit_sum = bit_vars.iter() .cloned() .zip(two_powers) .collect::<LinearCombination>(); // Enforce that var is equal to the inner product of the bits with powers of two. cs.constrain(var - bit_sum); Ok(bit_vars) } fn lt_gate<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, lhs: LinearCombination, rhs: LinearCombination ) -> Result<LinearCombination, R1CSError> { let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?; let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?; let zero = LinearCombination::default(); // Iterate through bits from most significant to least, comparing each pair. let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter()) .rev() .fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| { // lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs // respectively after the first i most significant bits. // Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt). // eq =!(lt || gt) let eq = negate_bit(lt.clone() + gt.clone()); // Whether left bit i is < or > right bit i. // bit_lt =!l_bit && r_bit = (1 - l_bit) * r_bit // bit_gt = l_bit &&!r_bit = l_bit * (1 - r_bit) let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into()); let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit)); // new_lt = lt + eq && bit_lt // -> lt_diff = new_lt - lt = eq * bit_lt // new_gt = gt + eq && bit_gt // -> gt_diff = new_gt - gt = eq * bit_gt let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into()); let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into()); (lt + lt_diff, gt + gt_diff) }); Ok(lt) } fn synthesize<CS: ConstraintSystem>( cs: &mut CS, brackets: &TaxBrackets, values: &[Variable], expected: &Variable ) -> Result<(), R1CSError> { // Compute Σ values. let total = values.iter() .map(|val| (val.clone(), Scalar::one())) .collect::<LinearCombination>(); let mut last_cutoff = Scalar::zero(); let mut cumulative = LinearCombination::default(); for (cutoff, rate) in brackets.0.iter() { let next_cutoff = Scalar::from(*cutoff); let rate_scalar = Scalar::from(*rate); let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?; let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?; let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone())); let (_, _, between_value) = cs.multiply( total.clone() - last_cutoff, LinearCombination::from(between_last_next) * rate_scalar ); let (_, _, exceeds_value) = cs.multiply( LinearCombination::from(next_cutoff - last_cutoff), gt_next * rate_scalar ); cumulative = cumulative + between_value + exceeds_value; last_cutoff = next_cutoff; } cumulative = cumulative - expected.clone(); cs.constrain(cumulative); Ok(()) } fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 { (0..brackets.0.len()) .map(|i| { let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 }; let (next_cutoff, rate) = brackets.0[i]; let amount = if total > next_cutoff { next_cutoff - last_cutoff } else if total > last_cutoff { total - last_cutoff } else { 0 }; amount * rate }) .fold(0, |sum, v| sum + v) } fn main() { let brackets = TaxBrackets(vec![ (952500, 10), (3870000, 12), (8250000, 22), (15750000, 24), (20000000, 32), (50000000, 35), (u64::MAX, 37), ]); let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(8192, 1); let mut prover_transcript = Transcript::new(b"zk taxes"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let mut rng = rand::thread_rng(); let income_amounts = (0..4) // Multiply by 100 cents to ensure there is no rounding necessary. .map(|_| rng.gen_range(0, 100000) * 100) .collect::<Vec<_>>(); let total_income = income_amounts.iter().fold(0, |sum, v| sum + v); let total_tax = compute_taxes(&brackets, total_income); println!("Total: {}, Taxes: {}", total_income, total_tax); let inputs = income_amounts.iter() .map(|value| (Scalar::from(*value), Scalar::random(&mut rng))) .collect::<Vec<_>>(); let output_v = Scalar::from(total_tax); let output_r = Scalar::random(&mut rng); let (input_pts, input_vars) = inputs.iter() .map(|(v, r)| prover.commit(*v, *r)) .unzip::<_, _, Vec<_>, Vec<_>>(); let (output_pt, output_var) = prover.commit(output_v, output_r); synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap(); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"zk taxes"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let input_vars = input_pts.iter() .map(|pt| verifier.commit(*pt)) .collect::<Vec<_>>(); let output_var = verifier.commit(output_pt); synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap(); assert!(verifier.verify(&proof).is_ok()); println!("Success!"); } #[cfg(test)] mod tests { use super::*; #[test] fn test_to_bits_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(128, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x = rng.gen::<u64>(); let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng)); let (out_pts, out_vars) = (0..64) .map(|i| { prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng)) }) .unzip::<_, _, Vec<_>, Vec<_>>(); let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { prover.constrain(wire1 - wire2); } let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in_var = verifier.commit(in_pt); let out_vars = out_pts.into_iter() .map(|out_pt| verifier.commit(out_pt)) .collect::<Vec<_>>(); let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { verifier.constrain(wire1 - wire2); } assert!(verifier.verify(&proof).is_ok()); } } #[test] fn test_lt_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(512, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x1 = rng.gen::<u64>(); let x2 = rng.gen::<u64>(); let expected_out = if x1 < x2 { 1u64 } else {
let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng)); let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng)); let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng)); let result = lt_gate( &mut prover, 64, in1_var.into(), in2_var.into() ).unwrap(); prover.constrain(result - out_var); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in1_var = verifier.commit(in1_pt); let in2_var = verifier.commit(in2_pt); let out_var = verifier.commit(out_pt); let result = lt_gate( &mut verifier, 64, in1_var.into(), in2_var.into() ).unwrap(); verifier.constrain(result - out_var); assert!(verifier.verify(&proof).is_ok()); } } }
0u64 };
conditional_block
main.rs
use bulletproofs::r1cs::{ ConstraintSystem, LinearCombination, Prover, R1CSError, Variable, Verifier, }; use bulletproofs::{ BulletproofGens, PedersenGens, }; use curve25519_dalek::scalar::Scalar; use merlin::Transcript; use rand::Rng; use std::u64; struct TaxBrackets(Vec<(u64, u64)>); fn negate_bit<T>(x: T) -> LinearCombination where T: Into<LinearCombination> { LinearCombination::from(Variable::One()) - x } fn scalar_to_bits_le<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, var: LinearCombination ) -> Result<Vec<Variable>, R1CSError> { // This is a helper function that caches the evaluation of the input variable so that it // doesn't get recomputed and verified for each bit allocation. let mut cache_evaluation = { let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1; let local_var = var.clone(); let mut val_cache = None; move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> { if val_cache.is_none() { let val = eval(&local_var); let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0); val_cache = Some( if valid { Ok(val) } else { Err(R1CSError::GadgetError { description: format!("Value is not represented in {} bits", n_bits) }) } ); } val_cache.as_ref() .expect("the value must have been computed and cached by the block above") .as_ref() .map(|scalar| get_bit(scalar, i)) .map_err(|e| e.clone()) } }; let bit_vars = (0..n_bits) .map(|i| { let (lhs, rhs, out) = cs.allocate(|eval| { let bit = cache_evaluation(eval, i)?; Ok((bit.into(), (1 - bit).into(), Scalar::zero())) })?; // Enforce that lhs variable represents a bit. // b (1 - b) = 0 cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One()); cs.constrain(out.into()); Ok(lhs) }) .collect::<Result<Vec<_>, _>>()?; let two_powers = (0..n_bits).map(|i| { let mut two_power_repr = [0u8; 32]; two_power_repr[i >> 3] |= 1 << (i & 7); Scalar::from_bits(two_power_repr) }); let bit_sum = bit_vars.iter() .cloned() .zip(two_powers) .collect::<LinearCombination>(); // Enforce that var is equal to the inner product of the bits with powers of two. cs.constrain(var - bit_sum); Ok(bit_vars) } fn lt_gate<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, lhs: LinearCombination, rhs: LinearCombination ) -> Result<LinearCombination, R1CSError> { let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?; let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?; let zero = LinearCombination::default(); // Iterate through bits from most significant to least, comparing each pair. let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter()) .rev() .fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| { // lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs // respectively after the first i most significant bits. // Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt). // eq =!(lt || gt) let eq = negate_bit(lt.clone() + gt.clone()); // Whether left bit i is < or > right bit i. // bit_lt =!l_bit && r_bit = (1 - l_bit) * r_bit // bit_gt = l_bit &&!r_bit = l_bit * (1 - r_bit) let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into()); let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit)); // new_lt = lt + eq && bit_lt // -> lt_diff = new_lt - lt = eq * bit_lt // new_gt = gt + eq && bit_gt // -> gt_diff = new_gt - gt = eq * bit_gt let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into()); let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into()); (lt + lt_diff, gt + gt_diff) }); Ok(lt) } fn synthesize<CS: ConstraintSystem>( cs: &mut CS, brackets: &TaxBrackets, values: &[Variable], expected: &Variable ) -> Result<(), R1CSError> { // Compute Σ values. let total = values.iter() .map(|val| (val.clone(), Scalar::one())) .collect::<LinearCombination>(); let mut last_cutoff = Scalar::zero(); let mut cumulative = LinearCombination::default(); for (cutoff, rate) in brackets.0.iter() { let next_cutoff = Scalar::from(*cutoff); let rate_scalar = Scalar::from(*rate); let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?; let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?; let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone())); let (_, _, between_value) = cs.multiply( total.clone() - last_cutoff, LinearCombination::from(between_last_next) * rate_scalar ); let (_, _, exceeds_value) = cs.multiply( LinearCombination::from(next_cutoff - last_cutoff), gt_next * rate_scalar ); cumulative = cumulative + between_value + exceeds_value; last_cutoff = next_cutoff; } cumulative = cumulative - expected.clone(); cs.constrain(cumulative); Ok(()) } fn c
brackets: &TaxBrackets, total: u64) -> u64 { (0..brackets.0.len()) .map(|i| { let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 }; let (next_cutoff, rate) = brackets.0[i]; let amount = if total > next_cutoff { next_cutoff - last_cutoff } else if total > last_cutoff { total - last_cutoff } else { 0 }; amount * rate }) .fold(0, |sum, v| sum + v) } fn main() { let brackets = TaxBrackets(vec![ (952500, 10), (3870000, 12), (8250000, 22), (15750000, 24), (20000000, 32), (50000000, 35), (u64::MAX, 37), ]); let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(8192, 1); let mut prover_transcript = Transcript::new(b"zk taxes"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let mut rng = rand::thread_rng(); let income_amounts = (0..4) // Multiply by 100 cents to ensure there is no rounding necessary. .map(|_| rng.gen_range(0, 100000) * 100) .collect::<Vec<_>>(); let total_income = income_amounts.iter().fold(0, |sum, v| sum + v); let total_tax = compute_taxes(&brackets, total_income); println!("Total: {}, Taxes: {}", total_income, total_tax); let inputs = income_amounts.iter() .map(|value| (Scalar::from(*value), Scalar::random(&mut rng))) .collect::<Vec<_>>(); let output_v = Scalar::from(total_tax); let output_r = Scalar::random(&mut rng); let (input_pts, input_vars) = inputs.iter() .map(|(v, r)| prover.commit(*v, *r)) .unzip::<_, _, Vec<_>, Vec<_>>(); let (output_pt, output_var) = prover.commit(output_v, output_r); synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap(); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"zk taxes"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let input_vars = input_pts.iter() .map(|pt| verifier.commit(*pt)) .collect::<Vec<_>>(); let output_var = verifier.commit(output_pt); synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap(); assert!(verifier.verify(&proof).is_ok()); println!("Success!"); } #[cfg(test)] mod tests { use super::*; #[test] fn test_to_bits_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(128, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x = rng.gen::<u64>(); let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng)); let (out_pts, out_vars) = (0..64) .map(|i| { prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng)) }) .unzip::<_, _, Vec<_>, Vec<_>>(); let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { prover.constrain(wire1 - wire2); } let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in_var = verifier.commit(in_pt); let out_vars = out_pts.into_iter() .map(|out_pt| verifier.commit(out_pt)) .collect::<Vec<_>>(); let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { verifier.constrain(wire1 - wire2); } assert!(verifier.verify(&proof).is_ok()); } } #[test] fn test_lt_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(512, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x1 = rng.gen::<u64>(); let x2 = rng.gen::<u64>(); let expected_out = if x1 < x2 { 1u64 } else { 0u64 }; let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng)); let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng)); let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng)); let result = lt_gate( &mut prover, 64, in1_var.into(), in2_var.into() ).unwrap(); prover.constrain(result - out_var); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in1_var = verifier.commit(in1_pt); let in2_var = verifier.commit(in2_pt); let out_var = verifier.commit(out_pt); let result = lt_gate( &mut verifier, 64, in1_var.into(), in2_var.into() ).unwrap(); verifier.constrain(result - out_var); assert!(verifier.verify(&proof).is_ok()); } } }
ompute_taxes(
identifier_name
main.rs
use bulletproofs::r1cs::{ ConstraintSystem, LinearCombination, Prover, R1CSError, Variable, Verifier, }; use bulletproofs::{ BulletproofGens, PedersenGens, }; use curve25519_dalek::scalar::Scalar; use merlin::Transcript; use rand::Rng; use std::u64; struct TaxBrackets(Vec<(u64, u64)>); fn negate_bit<T>(x: T) -> LinearCombination where T: Into<LinearCombination> { LinearCombination::from(Variable::One()) - x } fn scalar_to_bits_le<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, var: LinearCombination ) -> Result<Vec<Variable>, R1CSError> { // This is a helper function that caches the evaluation of the input variable so that it // doesn't get recomputed and verified for each bit allocation. let mut cache_evaluation = { let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1; let local_var = var.clone(); let mut val_cache = None; move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> { if val_cache.is_none() { let val = eval(&local_var); let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0); val_cache = Some( if valid { Ok(val) } else { Err(R1CSError::GadgetError { description: format!("Value is not represented in {} bits", n_bits) }) } ); } val_cache.as_ref() .expect("the value must have been computed and cached by the block above") .as_ref() .map(|scalar| get_bit(scalar, i)) .map_err(|e| e.clone()) } }; let bit_vars = (0..n_bits) .map(|i| { let (lhs, rhs, out) = cs.allocate(|eval| { let bit = cache_evaluation(eval, i)?; Ok((bit.into(), (1 - bit).into(), Scalar::zero())) })?; // Enforce that lhs variable represents a bit. // b (1 - b) = 0 cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One()); cs.constrain(out.into()); Ok(lhs) }) .collect::<Result<Vec<_>, _>>()?; let two_powers = (0..n_bits).map(|i| { let mut two_power_repr = [0u8; 32]; two_power_repr[i >> 3] |= 1 << (i & 7); Scalar::from_bits(two_power_repr) }); let bit_sum = bit_vars.iter() .cloned() .zip(two_powers) .collect::<LinearCombination>(); // Enforce that var is equal to the inner product of the bits with powers of two. cs.constrain(var - bit_sum); Ok(bit_vars) } fn lt_gate<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, lhs: LinearCombination,
) -> Result<LinearCombination, R1CSError> { let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?; let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?; let zero = LinearCombination::default(); // Iterate through bits from most significant to least, comparing each pair. let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter()) .rev() .fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| { // lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs // respectively after the first i most significant bits. // Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt). // eq =!(lt || gt) let eq = negate_bit(lt.clone() + gt.clone()); // Whether left bit i is < or > right bit i. // bit_lt =!l_bit && r_bit = (1 - l_bit) * r_bit // bit_gt = l_bit &&!r_bit = l_bit * (1 - r_bit) let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into()); let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit)); // new_lt = lt + eq && bit_lt // -> lt_diff = new_lt - lt = eq * bit_lt // new_gt = gt + eq && bit_gt // -> gt_diff = new_gt - gt = eq * bit_gt let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into()); let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into()); (lt + lt_diff, gt + gt_diff) }); Ok(lt) } fn synthesize<CS: ConstraintSystem>( cs: &mut CS, brackets: &TaxBrackets, values: &[Variable], expected: &Variable ) -> Result<(), R1CSError> { // Compute Σ values. let total = values.iter() .map(|val| (val.clone(), Scalar::one())) .collect::<LinearCombination>(); let mut last_cutoff = Scalar::zero(); let mut cumulative = LinearCombination::default(); for (cutoff, rate) in brackets.0.iter() { let next_cutoff = Scalar::from(*cutoff); let rate_scalar = Scalar::from(*rate); let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?; let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?; let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone())); let (_, _, between_value) = cs.multiply( total.clone() - last_cutoff, LinearCombination::from(between_last_next) * rate_scalar ); let (_, _, exceeds_value) = cs.multiply( LinearCombination::from(next_cutoff - last_cutoff), gt_next * rate_scalar ); cumulative = cumulative + between_value + exceeds_value; last_cutoff = next_cutoff; } cumulative = cumulative - expected.clone(); cs.constrain(cumulative); Ok(()) } fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 { (0..brackets.0.len()) .map(|i| { let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 }; let (next_cutoff, rate) = brackets.0[i]; let amount = if total > next_cutoff { next_cutoff - last_cutoff } else if total > last_cutoff { total - last_cutoff } else { 0 }; amount * rate }) .fold(0, |sum, v| sum + v) } fn main() { let brackets = TaxBrackets(vec![ (952500, 10), (3870000, 12), (8250000, 22), (15750000, 24), (20000000, 32), (50000000, 35), (u64::MAX, 37), ]); let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(8192, 1); let mut prover_transcript = Transcript::new(b"zk taxes"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let mut rng = rand::thread_rng(); let income_amounts = (0..4) // Multiply by 100 cents to ensure there is no rounding necessary. .map(|_| rng.gen_range(0, 100000) * 100) .collect::<Vec<_>>(); let total_income = income_amounts.iter().fold(0, |sum, v| sum + v); let total_tax = compute_taxes(&brackets, total_income); println!("Total: {}, Taxes: {}", total_income, total_tax); let inputs = income_amounts.iter() .map(|value| (Scalar::from(*value), Scalar::random(&mut rng))) .collect::<Vec<_>>(); let output_v = Scalar::from(total_tax); let output_r = Scalar::random(&mut rng); let (input_pts, input_vars) = inputs.iter() .map(|(v, r)| prover.commit(*v, *r)) .unzip::<_, _, Vec<_>, Vec<_>>(); let (output_pt, output_var) = prover.commit(output_v, output_r); synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap(); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"zk taxes"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let input_vars = input_pts.iter() .map(|pt| verifier.commit(*pt)) .collect::<Vec<_>>(); let output_var = verifier.commit(output_pt); synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap(); assert!(verifier.verify(&proof).is_ok()); println!("Success!"); } #[cfg(test)] mod tests { use super::*; #[test] fn test_to_bits_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(128, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x = rng.gen::<u64>(); let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng)); let (out_pts, out_vars) = (0..64) .map(|i| { prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng)) }) .unzip::<_, _, Vec<_>, Vec<_>>(); let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { prover.constrain(wire1 - wire2); } let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in_var = verifier.commit(in_pt); let out_vars = out_pts.into_iter() .map(|out_pt| verifier.commit(out_pt)) .collect::<Vec<_>>(); let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { verifier.constrain(wire1 - wire2); } assert!(verifier.verify(&proof).is_ok()); } } #[test] fn test_lt_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(512, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x1 = rng.gen::<u64>(); let x2 = rng.gen::<u64>(); let expected_out = if x1 < x2 { 1u64 } else { 0u64 }; let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng)); let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng)); let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng)); let result = lt_gate( &mut prover, 64, in1_var.into(), in2_var.into() ).unwrap(); prover.constrain(result - out_var); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in1_var = verifier.commit(in1_pt); let in2_var = verifier.commit(in2_pt); let out_var = verifier.commit(out_pt); let result = lt_gate( &mut verifier, 64, in1_var.into(), in2_var.into() ).unwrap(); verifier.constrain(result - out_var); assert!(verifier.verify(&proof).is_ok()); } } }
rhs: LinearCombination
random_line_split
main.rs
use bulletproofs::r1cs::{ ConstraintSystem, LinearCombination, Prover, R1CSError, Variable, Verifier, }; use bulletproofs::{ BulletproofGens, PedersenGens, }; use curve25519_dalek::scalar::Scalar; use merlin::Transcript; use rand::Rng; use std::u64; struct TaxBrackets(Vec<(u64, u64)>); fn negate_bit<T>(x: T) -> LinearCombination where T: Into<LinearCombination> { LinearCombination::from(Variable::One()) - x } fn scalar_to_bits_le<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, var: LinearCombination ) -> Result<Vec<Variable>, R1CSError> { // This is a helper function that caches the evaluation of the input variable so that it // doesn't get recomputed and verified for each bit allocation. let mut cache_evaluation = { let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1; let local_var = var.clone(); let mut val_cache = None; move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> { if val_cache.is_none() { let val = eval(&local_var); let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0); val_cache = Some( if valid { Ok(val) } else { Err(R1CSError::GadgetError { description: format!("Value is not represented in {} bits", n_bits) }) } ); } val_cache.as_ref() .expect("the value must have been computed and cached by the block above") .as_ref() .map(|scalar| get_bit(scalar, i)) .map_err(|e| e.clone()) } }; let bit_vars = (0..n_bits) .map(|i| { let (lhs, rhs, out) = cs.allocate(|eval| { let bit = cache_evaluation(eval, i)?; Ok((bit.into(), (1 - bit).into(), Scalar::zero())) })?; // Enforce that lhs variable represents a bit. // b (1 - b) = 0 cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One()); cs.constrain(out.into()); Ok(lhs) }) .collect::<Result<Vec<_>, _>>()?; let two_powers = (0..n_bits).map(|i| { let mut two_power_repr = [0u8; 32]; two_power_repr[i >> 3] |= 1 << (i & 7); Scalar::from_bits(two_power_repr) }); let bit_sum = bit_vars.iter() .cloned() .zip(two_powers) .collect::<LinearCombination>(); // Enforce that var is equal to the inner product of the bits with powers of two. cs.constrain(var - bit_sum); Ok(bit_vars) } fn lt_gate<CS: ConstraintSystem>( cs: &mut CS, n_bits: usize, lhs: LinearCombination, rhs: LinearCombination ) -> Result<LinearCombination, R1CSError>
let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into()); let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit)); // new_lt = lt + eq && bit_lt // -> lt_diff = new_lt - lt = eq * bit_lt // new_gt = gt + eq && bit_gt // -> gt_diff = new_gt - gt = eq * bit_gt let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into()); let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into()); (lt + lt_diff, gt + gt_diff) }); Ok(lt) } fn synthesize<CS: ConstraintSystem>( cs: &mut CS, brackets: &TaxBrackets, values: &[Variable], expected: &Variable ) -> Result<(), R1CSError> { // Compute Σ values. let total = values.iter() .map(|val| (val.clone(), Scalar::one())) .collect::<LinearCombination>(); let mut last_cutoff = Scalar::zero(); let mut cumulative = LinearCombination::default(); for (cutoff, rate) in brackets.0.iter() { let next_cutoff = Scalar::from(*cutoff); let rate_scalar = Scalar::from(*rate); let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?; let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?; let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone())); let (_, _, between_value) = cs.multiply( total.clone() - last_cutoff, LinearCombination::from(between_last_next) * rate_scalar ); let (_, _, exceeds_value) = cs.multiply( LinearCombination::from(next_cutoff - last_cutoff), gt_next * rate_scalar ); cumulative = cumulative + between_value + exceeds_value; last_cutoff = next_cutoff; } cumulative = cumulative - expected.clone(); cs.constrain(cumulative); Ok(()) } fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 { (0..brackets.0.len()) .map(|i| { let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 }; let (next_cutoff, rate) = brackets.0[i]; let amount = if total > next_cutoff { next_cutoff - last_cutoff } else if total > last_cutoff { total - last_cutoff } else { 0 }; amount * rate }) .fold(0, |sum, v| sum + v) } fn main() { let brackets = TaxBrackets(vec![ (952500, 10), (3870000, 12), (8250000, 22), (15750000, 24), (20000000, 32), (50000000, 35), (u64::MAX, 37), ]); let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(8192, 1); let mut prover_transcript = Transcript::new(b"zk taxes"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let mut rng = rand::thread_rng(); let income_amounts = (0..4) // Multiply by 100 cents to ensure there is no rounding necessary. .map(|_| rng.gen_range(0, 100000) * 100) .collect::<Vec<_>>(); let total_income = income_amounts.iter().fold(0, |sum, v| sum + v); let total_tax = compute_taxes(&brackets, total_income); println!("Total: {}, Taxes: {}", total_income, total_tax); let inputs = income_amounts.iter() .map(|value| (Scalar::from(*value), Scalar::random(&mut rng))) .collect::<Vec<_>>(); let output_v = Scalar::from(total_tax); let output_r = Scalar::random(&mut rng); let (input_pts, input_vars) = inputs.iter() .map(|(v, r)| prover.commit(*v, *r)) .unzip::<_, _, Vec<_>, Vec<_>>(); let (output_pt, output_var) = prover.commit(output_v, output_r); synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap(); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"zk taxes"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let input_vars = input_pts.iter() .map(|pt| verifier.commit(*pt)) .collect::<Vec<_>>(); let output_var = verifier.commit(output_pt); synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap(); assert!(verifier.verify(&proof).is_ok()); println!("Success!"); } #[cfg(test)] mod tests { use super::*; #[test] fn test_to_bits_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(128, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x = rng.gen::<u64>(); let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng)); let (out_pts, out_vars) = (0..64) .map(|i| { prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng)) }) .unzip::<_, _, Vec<_>, Vec<_>>(); let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { prover.constrain(wire1 - wire2); } let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in_var = verifier.commit(in_pt); let out_vars = out_pts.into_iter() .map(|out_pt| verifier.commit(out_pt)) .collect::<Vec<_>>(); let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap(); for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) { verifier.constrain(wire1 - wire2); } assert!(verifier.verify(&proof).is_ok()); } } #[test] fn test_lt_gadget() { let pc_gens = PedersenGens::default(); let bp_gens = BulletproofGens::new(512, 1); let mut rng = rand::thread_rng(); for _ in 0..100 { let x1 = rng.gen::<u64>(); let x2 = rng.gen::<u64>(); let expected_out = if x1 < x2 { 1u64 } else { 0u64 }; let mut prover_transcript = Transcript::new(b"test"); let mut prover = Prover::new( &bp_gens, &pc_gens, &mut prover_transcript, ); let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng)); let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng)); let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng)); let result = lt_gate( &mut prover, 64, in1_var.into(), in2_var.into() ).unwrap(); prover.constrain(result - out_var); let proof = prover.prove().unwrap(); let mut verifier_transcript = Transcript::new(b"test"); let mut verifier = Verifier::new( &bp_gens, &pc_gens, &mut verifier_transcript, ); let in1_var = verifier.commit(in1_pt); let in2_var = verifier.commit(in2_pt); let out_var = verifier.commit(out_pt); let result = lt_gate( &mut verifier, 64, in1_var.into(), in2_var.into() ).unwrap(); verifier.constrain(result - out_var); assert!(verifier.verify(&proof).is_ok()); } } }
{ let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?; let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?; let zero = LinearCombination::default(); // Iterate through bits from most significant to least, comparing each pair. let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter()) .rev() .fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| { // lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs // respectively after the first i most significant bits. // Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt). // eq = !(lt || gt) let eq = negate_bit(lt.clone() + gt.clone()); // Whether left bit i is < or > right bit i. // bit_lt = !l_bit && r_bit = (1 - l_bit) * r_bit // bit_gt = l_bit && !r_bit = l_bit * (1 - r_bit)
identifier_body
main.rs
extern crate rand; extern crate palette; use std::fs::File; use std::io::prelude::*; use std::path::Path; extern crate clap; use std::thread; use std::sync::mpsc; extern crate rayon; use rayon::prelude::*; mod kohonen_neuron; //use kohonen_neuron::rgb_vector_neuron; mod kohonen; use kohonen::Kohonen; mod sphere_of_influence; /** note: the energy coefficient should be from [0, 1] and should take into account both * distance from the BMU and color disparity */ pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { let mut rv = Vec::new(); let (r, c) = pos; let bmu = &net[r][c]; for r2 in 0..net.rows { for c2 in 0..net.cols { let comp1 = (r as f64) - (r2 as f64); let comp2 = (c as f64) - (c2 as f64); let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt(); if distance < (radius as f64) { let color_dist = bmu.distance(&net[r2][c2]); let energy = (distance / radius as f64) * (1.0 - color_dist); rv.push((r2, c2, energy)) } } } rv } pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8] where T: kohonen_neuron::KohonenNeuron { let (r, c) = pos; let rows = net.rows; let cols = net.cols; assert_eq!(rows, cols); let prev = |x| { if x as i32 - 1 < 0 { rows - 1 } else { x - 1 } }; let next = |x| (x + 1) % rows; [ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)), (r, prev(c)), (r, next(c)), (next(r), prev(c)), (next(r), c), (next(r), next(c)) ] } /** * @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1]) */ pub fn get_within_radius_fluid<T>( net: &Kohonen<T>, pos: (usize, usize), radius: i32, bucket_decay: f64) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { use std::collections::{HashSet, HashMap}; fn fluid_collect<T: kohonen_neuron::KohonenNeuron>( net: &Kohonen<T>, pos: (usize, usize), range: i32, pow_exp: f64) -> Vec<(usize, usize, f64)> { let (ro, co) = pos; // use variant of Dijkstra's algorithm to produce the shortest-path tree, then // prune that tree let mut unvisited_nodes = HashSet::new(); for r in 0..net.rows { for c in 0..net.cols { unvisited_nodes.insert((r, c)); } } let inf = 0.0; let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter() .map(|cur_pos| if cur_pos!= pos { (cur_pos, inf) } else { let (cur_r, cur_c) = cur_pos; (pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp))) }) .collect(); let mut current = pos; while unvisited_nodes.len() > 0 { let neighbours = get_neuron_neighbors(net, current); let unvisited_neighbours: Vec<(usize, usize)> = neighbours.iter() .filter(|neighbour| unvisited_nodes.contains(*neighbour)) .map(|pos| *pos) .collect(); let current_dist = *energies.get(&current).unwrap(); { let _res: Vec<(usize, usize)> = unvisited_neighbours.clone().into_iter().map( |(r, c)| { let decay = 1.0 - 1.0 / range as f64; let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay; let old_dist = *energies.get(&(r, c)).unwrap(); if new_dist > old_dist { energies.remove(&(r, c)); energies.insert((r, c), new_dist); }; (r, c) }) .collect(); }; let old_len = unvisited_nodes.len(); unvisited_nodes.remove(&current); assert!(old_len > unvisited_nodes.len()); if unvisited_nodes.len() > 0 { let old_cur = current; current = unvisited_nodes.clone().into_iter().fold( None, |acc, cand| match acc { None => Some(cand), Some(pos) => if energies.get(&cand) > energies.get(&pos) { Some(cand) } else { acc }, }) .unwrap(); assert!(old_cur!= current); }; } energies.into_iter() .filter(|(_pos, energy)| range as f64 * energy >= 1.0) .map(|((r, c), energy)| (r, c, /*range as f64 * */energy)) .collect() } let collected = fluid_collect(net, pos, radius, bucket_decay); collected .into_iter() /*.map(|(r, c, local_range)| { (r, c, radius - local_range) })*/ .collect() } pub fn feed_sample<T>( net: &mut Kohonen<T>, sample: &T, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> () where T: kohonen_neuron::KohonenNeuron { let (r, c, bmu_dist) = kohonen::get_bmu(net, sample); let bmu_pos = (r, c); let items = match associate { sphere_of_influence::AssociationKind::Bucket(bucket_decay) => get_within_radius_fluid(net, (r, c), radius, bucket_decay), sphere_of_influence::AssociationKind::Euclidean => get_within_radius(net, (r, c), radius), }; let mut displaced = 0.0; for i in 0..items.len() { let (r, c, item_dist) = items[i]; let dist = item_dist as f64 / radius as f64; let weight = (1.0 - dist).sqrt() * rate; let old = &net[r][c].clone(); let _ = (&mut net[r][c]).shift(&sample, weight); displaced = displaced + old.distance(&net[r][c]); if (r, c) == bmu_pos { //println!("\tweighting with {} at the BMU.", weight); } else { //println!("\tweighting with {} as {:?}.", weight, (r, c)); } } println!("\tDisplaced total of {} from {} items on a BMU of distance {}.", displaced, items.len(), bmu_dist); std::io::stdout().flush().unwrap(); thread::yield_now(); () } pub fn train<T>( net: Kohonen<T>, samples: &Vec<T>, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone +'static, Kohonen<T>: Send + Sync { let mut descs = Vec::new(); for i in 0..samples.len() { descs.push((net.clone(), samples[i].clone())); //feed_sample(net, &samples[i], rate, radius); } let nets: Vec<Kohonen<T>> = descs .par_iter() .map(|(my_net, sample)| { let associate = associate.clone(); let mut net = my_net.clone(); feed_sample(&mut net, &sample, rate, radius, associate); net }) .collect(); std::io::stdout().flush().unwrap(); kohonen::combine(net, nets) } pub fn
<T>( net: &Kohonen<T>, samples: &std::vec::Vec<T>, its: u32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync +'static { let mut rv = net.clone(); let width = net.cols as f64; // training with a large fixed radius for a bit should help things get into // the right general places /*for _i in 0..(its / 2) { let radius = width / 2.0; let rate = 0.5; rv = train(rv.clone(), samples, rate, radius as i32, associate.clone()); } let its = its / 2 + (its % 2);*/ let time_constant = (its + 1) as f64 / width.ln(); for i in 0..its { let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp(); //let radius = width / 2.0; let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt(); //let rate = 0.75; println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate); std::io::stdout().flush().unwrap(); let net2 = rv.clone(); rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone()) } rv } pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) { let rows = net.rows; let cols = net.cols; let path = Path::new(path); let mut os = match File::create(&path) { Err(why) => panic!("couldn't make file pls halp: {}", why), Ok(file) => file, }; let _ = os.write_all("P6\n".as_bytes()); let _ = os.write_all((cols as u64).to_string().as_bytes()); let _ = os.write_all(" ".as_bytes()); let _ = os.write_all((rows as u64).to_string().as_bytes()); let _ = os.write_all("\n255\n".as_bytes()); for r in 0..rows { for c in 0..cols { let (r, g, b) = net[r][c].get_rgb(); let _ = os.write_all(&[r, g, b]); } } } /*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) { }*/ fn main() { use clap::{Arg, App}; let matches = App::new("kohonen") .version("0.1.0") .about("A Kohonen SOFM") .author("Fuck off") .arg(Arg::with_name("iterations") .short("its") .long("iterations") .help("How many training iterations to do") .takes_value(true)) .arg(Arg::with_name("dim") .short("dim") .long("dimension") .help("The size of the network") .takes_value(true)) .arg(Arg::with_name("associate") .short("a") .long("associate") .help("The association method") .default_value("bucket") .possible_values(&["bucket", "euclidean"]) .takes_value(true)) .arg(Arg::with_name("bucket decay") .long("bucket-decay") .help( "Exponentially affects how much energy it takes to overcome a higher \ difference. Lower values will keep spheres of influence small and \ tight, while higher ones (above 1.0) will allow greater spread.") .default_value("0.7") .takes_value(true)) .arg(Arg::with_name("colour model") .long("colour-model") .help("The color model to use.") .default_value("hsl") .possible_values(&["hsl", "rgb"]) .takes_value(true)) .arg(Arg::with_name("centroids") .long("centroids") .help("A list of centroids") .takes_value(true)) .get_matches(); let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap(); let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap(); let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap(); let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap(); let associate = match associate { sphere_of_influence::AssociationKind::Bucket(_) => sphere_of_influence::AssociationKind::Bucket(bucket_decay), _ => associate }; println!( "Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.", dim=net_dim, its=train_its); let colors: Vec<[f64; 3]> = vec![ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.8, 0.8, 0.0], [0.0, 0.8, 0.8], [0.8, 0.0, 0.8], [0.4, 0.4, 0.4], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.66, 0.75], ]; match matches.value_of("colour model").unwrap() { "hsl" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); let colors = colors .into_iter() .map(|[r, g, b]| palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32))) .rev() .collect(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, "rgb" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, _ => () }; }
iter_train
identifier_name
main.rs
extern crate rand; extern crate palette; use std::fs::File; use std::io::prelude::*; use std::path::Path; extern crate clap; use std::thread; use std::sync::mpsc; extern crate rayon; use rayon::prelude::*; mod kohonen_neuron; //use kohonen_neuron::rgb_vector_neuron; mod kohonen; use kohonen::Kohonen; mod sphere_of_influence; /** note: the energy coefficient should be from [0, 1] and should take into account both * distance from the BMU and color disparity */ pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { let mut rv = Vec::new(); let (r, c) = pos; let bmu = &net[r][c]; for r2 in 0..net.rows { for c2 in 0..net.cols { let comp1 = (r as f64) - (r2 as f64); let comp2 = (c as f64) - (c2 as f64); let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt(); if distance < (radius as f64) { let color_dist = bmu.distance(&net[r2][c2]); let energy = (distance / radius as f64) * (1.0 - color_dist); rv.push((r2, c2, energy)) } } } rv } pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8] where T: kohonen_neuron::KohonenNeuron { let (r, c) = pos; let rows = net.rows; let cols = net.cols; assert_eq!(rows, cols); let prev = |x| { if x as i32 - 1 < 0 { rows - 1 } else { x - 1 } }; let next = |x| (x + 1) % rows; [ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)), (r, prev(c)), (r, next(c)), (next(r), prev(c)), (next(r), c), (next(r), next(c)) ] } /** * @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1]) */ pub fn get_within_radius_fluid<T>( net: &Kohonen<T>, pos: (usize, usize), radius: i32, bucket_decay: f64) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { use std::collections::{HashSet, HashMap}; fn fluid_collect<T: kohonen_neuron::KohonenNeuron>( net: &Kohonen<T>, pos: (usize, usize), range: i32, pow_exp: f64) -> Vec<(usize, usize, f64)> { let (ro, co) = pos; // use variant of Dijkstra's algorithm to produce the shortest-path tree, then // prune that tree let mut unvisited_nodes = HashSet::new(); for r in 0..net.rows { for c in 0..net.cols { unvisited_nodes.insert((r, c)); } } let inf = 0.0; let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter() .map(|cur_pos| if cur_pos!= pos { (cur_pos, inf) } else { let (cur_r, cur_c) = cur_pos; (pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp))) }) .collect(); let mut current = pos; while unvisited_nodes.len() > 0 { let neighbours = get_neuron_neighbors(net, current); let unvisited_neighbours: Vec<(usize, usize)> = neighbours.iter() .filter(|neighbour| unvisited_nodes.contains(*neighbour)) .map(|pos| *pos) .collect(); let current_dist = *energies.get(&current).unwrap(); { let _res: Vec<(usize, usize)> = unvisited_neighbours.clone().into_iter().map( |(r, c)| { let decay = 1.0 - 1.0 / range as f64; let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay; let old_dist = *energies.get(&(r, c)).unwrap(); if new_dist > old_dist { energies.remove(&(r, c)); energies.insert((r, c), new_dist); }; (r, c) }) .collect(); }; let old_len = unvisited_nodes.len(); unvisited_nodes.remove(&current); assert!(old_len > unvisited_nodes.len()); if unvisited_nodes.len() > 0 { let old_cur = current; current = unvisited_nodes.clone().into_iter().fold( None, |acc, cand| match acc { None => Some(cand), Some(pos) => if energies.get(&cand) > energies.get(&pos) { Some(cand) } else { acc }, }) .unwrap(); assert!(old_cur!= current); }; } energies.into_iter() .filter(|(_pos, energy)| range as f64 * energy >= 1.0) .map(|((r, c), energy)| (r, c, /*range as f64 * */energy)) .collect() } let collected = fluid_collect(net, pos, radius, bucket_decay); collected .into_iter() /*.map(|(r, c, local_range)| { (r, c, radius - local_range) })*/ .collect() } pub fn feed_sample<T>( net: &mut Kohonen<T>, sample: &T, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> () where T: kohonen_neuron::KohonenNeuron { let (r, c, bmu_dist) = kohonen::get_bmu(net, sample); let bmu_pos = (r, c); let items = match associate { sphere_of_influence::AssociationKind::Bucket(bucket_decay) => get_within_radius_fluid(net, (r, c), radius, bucket_decay), sphere_of_influence::AssociationKind::Euclidean => get_within_radius(net, (r, c), radius), }; let mut displaced = 0.0; for i in 0..items.len() { let (r, c, item_dist) = items[i]; let dist = item_dist as f64 / radius as f64; let weight = (1.0 - dist).sqrt() * rate; let old = &net[r][c].clone(); let _ = (&mut net[r][c]).shift(&sample, weight); displaced = displaced + old.distance(&net[r][c]); if (r, c) == bmu_pos { //println!("\tweighting with {} at the BMU.", weight); } else { //println!("\tweighting with {} as {:?}.", weight, (r, c)); } } println!("\tDisplaced total of {} from {} items on a BMU of distance {}.", displaced, items.len(), bmu_dist); std::io::stdout().flush().unwrap(); thread::yield_now(); () } pub fn train<T>( net: Kohonen<T>, samples: &Vec<T>, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone +'static,
} let nets: Vec<Kohonen<T>> = descs .par_iter() .map(|(my_net, sample)| { let associate = associate.clone(); let mut net = my_net.clone(); feed_sample(&mut net, &sample, rate, radius, associate); net }) .collect(); std::io::stdout().flush().unwrap(); kohonen::combine(net, nets) } pub fn iter_train<T>( net: &Kohonen<T>, samples: &std::vec::Vec<T>, its: u32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync +'static { let mut rv = net.clone(); let width = net.cols as f64; // training with a large fixed radius for a bit should help things get into // the right general places /*for _i in 0..(its / 2) { let radius = width / 2.0; let rate = 0.5; rv = train(rv.clone(), samples, rate, radius as i32, associate.clone()); } let its = its / 2 + (its % 2);*/ let time_constant = (its + 1) as f64 / width.ln(); for i in 0..its { let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp(); //let radius = width / 2.0; let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt(); //let rate = 0.75; println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate); std::io::stdout().flush().unwrap(); let net2 = rv.clone(); rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone()) } rv } pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) { let rows = net.rows; let cols = net.cols; let path = Path::new(path); let mut os = match File::create(&path) { Err(why) => panic!("couldn't make file pls halp: {}", why), Ok(file) => file, }; let _ = os.write_all("P6\n".as_bytes()); let _ = os.write_all((cols as u64).to_string().as_bytes()); let _ = os.write_all(" ".as_bytes()); let _ = os.write_all((rows as u64).to_string().as_bytes()); let _ = os.write_all("\n255\n".as_bytes()); for r in 0..rows { for c in 0..cols { let (r, g, b) = net[r][c].get_rgb(); let _ = os.write_all(&[r, g, b]); } } } /*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) { }*/ fn main() { use clap::{Arg, App}; let matches = App::new("kohonen") .version("0.1.0") .about("A Kohonen SOFM") .author("Fuck off") .arg(Arg::with_name("iterations") .short("its") .long("iterations") .help("How many training iterations to do") .takes_value(true)) .arg(Arg::with_name("dim") .short("dim") .long("dimension") .help("The size of the network") .takes_value(true)) .arg(Arg::with_name("associate") .short("a") .long("associate") .help("The association method") .default_value("bucket") .possible_values(&["bucket", "euclidean"]) .takes_value(true)) .arg(Arg::with_name("bucket decay") .long("bucket-decay") .help( "Exponentially affects how much energy it takes to overcome a higher \ difference. Lower values will keep spheres of influence small and \ tight, while higher ones (above 1.0) will allow greater spread.") .default_value("0.7") .takes_value(true)) .arg(Arg::with_name("colour model") .long("colour-model") .help("The color model to use.") .default_value("hsl") .possible_values(&["hsl", "rgb"]) .takes_value(true)) .arg(Arg::with_name("centroids") .long("centroids") .help("A list of centroids") .takes_value(true)) .get_matches(); let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap(); let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap(); let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap(); let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap(); let associate = match associate { sphere_of_influence::AssociationKind::Bucket(_) => sphere_of_influence::AssociationKind::Bucket(bucket_decay), _ => associate }; println!( "Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.", dim=net_dim, its=train_its); let colors: Vec<[f64; 3]> = vec![ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.8, 0.8, 0.0], [0.0, 0.8, 0.8], [0.8, 0.0, 0.8], [0.4, 0.4, 0.4], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.66, 0.75], ]; match matches.value_of("colour model").unwrap() { "hsl" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); let colors = colors .into_iter() .map(|[r, g, b]| palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32))) .rev() .collect(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, "rgb" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, _ => () }; }
Kohonen<T>: Send + Sync { let mut descs = Vec::new(); for i in 0..samples.len() { descs.push((net.clone(), samples[i].clone())); //feed_sample(net, &samples[i], rate, radius);
random_line_split
main.rs
extern crate rand; extern crate palette; use std::fs::File; use std::io::prelude::*; use std::path::Path; extern crate clap; use std::thread; use std::sync::mpsc; extern crate rayon; use rayon::prelude::*; mod kohonen_neuron; //use kohonen_neuron::rgb_vector_neuron; mod kohonen; use kohonen::Kohonen; mod sphere_of_influence; /** note: the energy coefficient should be from [0, 1] and should take into account both * distance from the BMU and color disparity */ pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { let mut rv = Vec::new(); let (r, c) = pos; let bmu = &net[r][c]; for r2 in 0..net.rows { for c2 in 0..net.cols { let comp1 = (r as f64) - (r2 as f64); let comp2 = (c as f64) - (c2 as f64); let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt(); if distance < (radius as f64) { let color_dist = bmu.distance(&net[r2][c2]); let energy = (distance / radius as f64) * (1.0 - color_dist); rv.push((r2, c2, energy)) } } } rv } pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8] where T: kohonen_neuron::KohonenNeuron { let (r, c) = pos; let rows = net.rows; let cols = net.cols; assert_eq!(rows, cols); let prev = |x| { if x as i32 - 1 < 0 { rows - 1 } else { x - 1 } }; let next = |x| (x + 1) % rows; [ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)), (r, prev(c)), (r, next(c)), (next(r), prev(c)), (next(r), c), (next(r), next(c)) ] } /** * @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1]) */ pub fn get_within_radius_fluid<T>( net: &Kohonen<T>, pos: (usize, usize), radius: i32, bucket_decay: f64) -> std::vec::Vec<(usize, usize, f64)> where T: kohonen_neuron::KohonenNeuron { use std::collections::{HashSet, HashMap}; fn fluid_collect<T: kohonen_neuron::KohonenNeuron>( net: &Kohonen<T>, pos: (usize, usize), range: i32, pow_exp: f64) -> Vec<(usize, usize, f64)> { let (ro, co) = pos; // use variant of Dijkstra's algorithm to produce the shortest-path tree, then // prune that tree let mut unvisited_nodes = HashSet::new(); for r in 0..net.rows { for c in 0..net.cols { unvisited_nodes.insert((r, c)); } } let inf = 0.0; let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter() .map(|cur_pos| if cur_pos!= pos { (cur_pos, inf) } else { let (cur_r, cur_c) = cur_pos; (pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp))) }) .collect(); let mut current = pos; while unvisited_nodes.len() > 0 { let neighbours = get_neuron_neighbors(net, current); let unvisited_neighbours: Vec<(usize, usize)> = neighbours.iter() .filter(|neighbour| unvisited_nodes.contains(*neighbour)) .map(|pos| *pos) .collect(); let current_dist = *energies.get(&current).unwrap(); { let _res: Vec<(usize, usize)> = unvisited_neighbours.clone().into_iter().map( |(r, c)| { let decay = 1.0 - 1.0 / range as f64; let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay; let old_dist = *energies.get(&(r, c)).unwrap(); if new_dist > old_dist { energies.remove(&(r, c)); energies.insert((r, c), new_dist); }; (r, c) }) .collect(); }; let old_len = unvisited_nodes.len(); unvisited_nodes.remove(&current); assert!(old_len > unvisited_nodes.len()); if unvisited_nodes.len() > 0 { let old_cur = current; current = unvisited_nodes.clone().into_iter().fold( None, |acc, cand| match acc { None => Some(cand), Some(pos) => if energies.get(&cand) > energies.get(&pos) { Some(cand) } else { acc }, }) .unwrap(); assert!(old_cur!= current); }; } energies.into_iter() .filter(|(_pos, energy)| range as f64 * energy >= 1.0) .map(|((r, c), energy)| (r, c, /*range as f64 * */energy)) .collect() } let collected = fluid_collect(net, pos, radius, bucket_decay); collected .into_iter() /*.map(|(r, c, local_range)| { (r, c, radius - local_range) })*/ .collect() } pub fn feed_sample<T>( net: &mut Kohonen<T>, sample: &T, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> () where T: kohonen_neuron::KohonenNeuron { let (r, c, bmu_dist) = kohonen::get_bmu(net, sample); let bmu_pos = (r, c); let items = match associate { sphere_of_influence::AssociationKind::Bucket(bucket_decay) => get_within_radius_fluid(net, (r, c), radius, bucket_decay), sphere_of_influence::AssociationKind::Euclidean => get_within_radius(net, (r, c), radius), }; let mut displaced = 0.0; for i in 0..items.len() { let (r, c, item_dist) = items[i]; let dist = item_dist as f64 / radius as f64; let weight = (1.0 - dist).sqrt() * rate; let old = &net[r][c].clone(); let _ = (&mut net[r][c]).shift(&sample, weight); displaced = displaced + old.distance(&net[r][c]); if (r, c) == bmu_pos { //println!("\tweighting with {} at the BMU.", weight); } else { //println!("\tweighting with {} as {:?}.", weight, (r, c)); } } println!("\tDisplaced total of {} from {} items on a BMU of distance {}.", displaced, items.len(), bmu_dist); std::io::stdout().flush().unwrap(); thread::yield_now(); () } pub fn train<T>( net: Kohonen<T>, samples: &Vec<T>, rate: f64, radius: i32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone +'static, Kohonen<T>: Send + Sync { let mut descs = Vec::new(); for i in 0..samples.len() { descs.push((net.clone(), samples[i].clone())); //feed_sample(net, &samples[i], rate, radius); } let nets: Vec<Kohonen<T>> = descs .par_iter() .map(|(my_net, sample)| { let associate = associate.clone(); let mut net = my_net.clone(); feed_sample(&mut net, &sample, rate, radius, associate); net }) .collect(); std::io::stdout().flush().unwrap(); kohonen::combine(net, nets) } pub fn iter_train<T>( net: &Kohonen<T>, samples: &std::vec::Vec<T>, its: u32, associate: sphere_of_influence::AssociationKind) -> Kohonen<T> where T: kohonen_neuron::KohonenNeuron + Send + Sync +'static
rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone()) } rv } pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) { let rows = net.rows; let cols = net.cols; let path = Path::new(path); let mut os = match File::create(&path) { Err(why) => panic!("couldn't make file pls halp: {}", why), Ok(file) => file, }; let _ = os.write_all("P6\n".as_bytes()); let _ = os.write_all((cols as u64).to_string().as_bytes()); let _ = os.write_all(" ".as_bytes()); let _ = os.write_all((rows as u64).to_string().as_bytes()); let _ = os.write_all("\n255\n".as_bytes()); for r in 0..rows { for c in 0..cols { let (r, g, b) = net[r][c].get_rgb(); let _ = os.write_all(&[r, g, b]); } } } /*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) { }*/ fn main() { use clap::{Arg, App}; let matches = App::new("kohonen") .version("0.1.0") .about("A Kohonen SOFM") .author("Fuck off") .arg(Arg::with_name("iterations") .short("its") .long("iterations") .help("How many training iterations to do") .takes_value(true)) .arg(Arg::with_name("dim") .short("dim") .long("dimension") .help("The size of the network") .takes_value(true)) .arg(Arg::with_name("associate") .short("a") .long("associate") .help("The association method") .default_value("bucket") .possible_values(&["bucket", "euclidean"]) .takes_value(true)) .arg(Arg::with_name("bucket decay") .long("bucket-decay") .help( "Exponentially affects how much energy it takes to overcome a higher \ difference. Lower values will keep spheres of influence small and \ tight, while higher ones (above 1.0) will allow greater spread.") .default_value("0.7") .takes_value(true)) .arg(Arg::with_name("colour model") .long("colour-model") .help("The color model to use.") .default_value("hsl") .possible_values(&["hsl", "rgb"]) .takes_value(true)) .arg(Arg::with_name("centroids") .long("centroids") .help("A list of centroids") .takes_value(true)) .get_matches(); let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap(); let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap(); let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap(); let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap(); let associate = match associate { sphere_of_influence::AssociationKind::Bucket(_) => sphere_of_influence::AssociationKind::Bucket(bucket_decay), _ => associate }; println!( "Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.", dim=net_dim, its=train_its); let colors: Vec<[f64; 3]> = vec![ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.8, 0.8, 0.0], [0.0, 0.8, 0.8], [0.8, 0.0, 0.8], [0.4, 0.4, 0.4], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.66, 0.75], ]; match matches.value_of("colour model").unwrap() { "hsl" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); let colors = colors .into_iter() .map(|[r, g, b]| palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32))) .rev() .collect(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, "rgb" => { let mut net = kohonen::new(net_dim as usize); let old_net = net.clone(); net = iter_train(&net, &colors, train_its, associate); println!("Overall displacement: {}", kohonen::disp(&old_net, &net)); let file = format!("./map_{its}its.ppm", its=train_its); show(&net, &file) }, _ => () }; }
{ let mut rv = net.clone(); let width = net.cols as f64; // training with a large fixed radius for a bit should help things get into // the right general places /*for _i in 0..(its / 2) { let radius = width / 2.0; let rate = 0.5; rv = train(rv.clone(), samples, rate, radius as i32, associate.clone()); } let its = its / 2 + (its % 2);*/ let time_constant = (its + 1) as f64 / width.ln(); for i in 0..its { let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp(); //let radius = width / 2.0; let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt(); //let rate = 0.75; println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate); std::io::stdout().flush().unwrap(); let net2 = rv.clone();
identifier_body
mod.rs
//! A stateless, layered, multithread video system with OpenGL backends. //! //! # Overview and Goals //! //! The management of video effects has become an important topic and key feature of //! rendering engines. With the increasing number of effects it is not sufficient anymore //! to only support them, but also to integrate them into the rendering engine in a clean //! and extensible way. //! //! The goal of this work and simultaneously its main contribution is to design and //! implement an advanced effects framework. Using this framework it should be easy for //! further applications to combine several small effects like texture mapping, shading //! and shadowing in an automated and transparent way and apply them to any 3D model. //! Additionally, it should be possible to integrate new effects and use the provided //! framework for rapid prototyping. //! //! ### Multi Platform //! //! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms. //! There still are a huge number of performance and feature limited devices, so this //! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0. //! //! ### Stateless Pipeline //! //! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This //! means whenever you change any state in the API for subsequent draw calls, this state //! change also affects draw calls submitted at a later point in time. Ideally, submitting //! a draw call with whatever state we want should not affect any of the other draw calls, //! even in multi-thread environments. //! //! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium) //! bundles render state and data into a few, precompiled resource objects which are //! combined into final render pipeline. We should follow the same philosophy. //! //! ### Multi-thread //! //! In most cases, dividing OpenGL rendering across multiple threads will not result in //! any performance improvement due the pipeline nature of OpenGL. What we are about //! to do is actually exploiting parallelism in resource preparation, and provides a set of //! multi-thread friendly APIs. //! //! The most common solution is by using a double-buffer of commands. This consists of //! running the renderer backend in a speparate thread, where all draw calls and communication //! with the OpenGL API are performed. The frontend thread that runs the game logic //! communicates with the backend renderer via a command double-buffer. //! //! ### Layered Rendering //! //! Its important to sort video commands (generated by different threads) before submiting //! them to OpenGL, for the sack of both correctness and performance. For example, to draw //! transparent objects via blending, we need draw opaque object first, usually from front-to-back, //! and draw translucents from back-to-front. //! //! The idea here is to assign a integer key to a command which is used for sorting. Depending //! on where those bits are stored in the integer, you can apply different sorting criteria //! for the same array of commands, as long as you know how the keys were built. //! //! # Resource Objects //! //! Render state and data, which are combined into final render pipeline, are bundled into a //! few, precompiled resource objects in video module. //! //! All resources types can be created instantly from data in memory, and meshes, textures //! can also be loaded asynchronously from the filesystem. //! //! And the actual resource objects are usually private and opaque, you will get a `Handle` //! immediately for every resource objects you created instead of some kind of reference. //! Its the unique identifier for the resource, its type-safe and copyable. //! //! When you are done with the created resource objects, its your responsiblity to delete the //! resource object with `Handle` to avoid leaks. //! //! For these things loaded from filesystem, it could be safely shared by the `Location`. We //! keeps a use-counting internally. It will not be freed really, before all the users deletes //! its `Handle`. //! //! ### Surface Object //! //! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted //! in application code is attached to a specific `Surface`. Commands inside `Surface` are //! sorted before submitting to underlying OpenGL. //! //! Surface object also holds references to render target, and wraps rendering operations to //! it. Likes clearing, offscreen-rendering, MSAA resolve etc.. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Creates a `SurfaceParams` object. //! let mut params = SurfaceParams::default(); //! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments //! /// and a optional `Depth/DepthStencil` buffer attachment. //! /// //! /// If none attachment is assigned, the default framebuffer generated by the system will be //! /// used. //! params.set_attachments(&[], None); //! // Sets the clear flags for this surface and its underlying framebuffer. //! params.set_clear(Color::white(), 1.0, None); //! //! // Creates an surface with `SurfaceParams`. //! let surface = video::create_surface(params).unwrap(); //! // Deletes the surface object. //! video::delete_surface(surface); //! ``` //! //! ### Shader Object //! //! Shader object is introduced to encapsulate all stateful things we need to configurate //! video pipeline. This would also enable us to easily change the order of draw calls //! and get rid of redundant state changes. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Declares the uniform variable layouts. //! let mut uniforms = UniformVariableLayout::build() //! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f) //! .with("u_MVPMatrix", UniformVariableType::Matrix4f) //! .finish(); //! //! // Declares the attributes. //! let attributes = AttributeLayout::build() //! .with(Attribute::Position, 3) //! .with(Attribute::Normal, 3) //! .finish(); //! //! let mut params = ShaderParams::default(); //! params.attributes = attributes; //! params.uniforms = uniforms; //! params.state = RenderState::default(); //! //! let vs = "..".into(); //! let fs = "..".into(); //! //! // Create a shader with initial shaders and render state. It encapusulates all the //! // informations we need to configurate graphics pipeline before real drawing. //! let shader = video::create_shader(params, vs, fs).unwrap(); //! //! // Deletes shader object. //! video::delete_shader(shader); //! ``` //! //! ### Texture Object //! //! A texture object is a container of one or more images. It can be the source of a texture //! access from a Shader. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = TextureParams::default(); //! //! // Create a texture object with optional data. You can fill it later with `update_texture`. //! let texture = video::create_texture(params, None).unwrap(); //! //! // Deletes the texture object. //! video::delete_texture(texture); //! ``` //! //! #### Compressed Texture Format //! //! _TODO_: Cube texture. //! _TODO_: 3D texture. //! //! ### Mesh Object //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = MeshParams::default(); //! //! // Create a mesh object with optional data. You can fill it later with `update_mesh`. //! let mesh = video::create_mesh(params, None).unwrap(); //! //! // Deletes the mesh object. //! video::delete_mesh(mesh); //! ``` //! //! # Commands //! //! _TODO_: CommandBuffer //! _TODO_: DrawCommandBuffer /// Maximum number of attributes in vertex layout. pub const MAX_VERTEX_ATTRIBUTES: usize = 12; /// Maximum number of attachments in framebuffer. pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8; /// Maximum number of uniform variables in shader. pub const MAX_UNIFORM_VARIABLES: usize = 32; /// Maximum number of textures in shader. pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8; #[macro_use] pub mod assets; pub mod command; pub mod errors; mod system; mod backends; pub mod prelude { pub use super::assets::prelude::*; pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer}; } use std::sync::Arc; use uuid::Uuid; use crate::math::prelude::Aabb2; use crate::prelude::CrResult; use crate::res::utils::prelude::ResourceState; use crate::utils::double_buf::DoubleBuf; use self::assets::prelude::*; use self::backends::frame::Frame; use self::errors::*; use self::ins::{ctx, CTX}; use self::system::VideoSystem; /// Setup the video system. pub(crate) unsafe fn setup() -> CrResult<()> { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::new()?; CTX = Box::into_raw(Box::new(ctx)); Ok(()) } /// Setup the video system. pub(crate) unsafe fn headless() { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::headless(); CTX = Box::into_raw(Box::new(ctx)); } /// Discard the video system. pub(crate) unsafe fn discard() { if CTX.is_null()
drop(Box::from_raw(CTX as *mut VideoSystem)); CTX = std::ptr::null(); } pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> { ctx().frames() } /// Creates an surface with `SurfaceParams`. #[inline] pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> { ctx().create_surface(params) } /// Gets the `SurfaceParams` if available. #[inline] pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> { ctx().surface(handle) } /// Get the resource state of specified surface. #[inline] pub fn surface_state(handle: SurfaceHandle) -> ResourceState { ctx().surface_state(handle) } /// Deletes surface object. #[inline] pub fn delete_surface(handle: SurfaceHandle) { ctx().delete_surface(handle) } /// Create a shader with initial shaders and render state. It encapusulates all the /// informations we need to configurate graphics pipeline before real drawing. #[inline] pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> { ctx().create_shader(params, vs, fs) } /// Gets the `ShaderParams` if available. #[inline] pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> { ctx().shader(handle) } /// Get the resource state of specified shader. #[inline] pub fn shader_state(handle: ShaderHandle) -> ResourceState { ctx().shader_state(handle) } /// Delete shader state object. #[inline] pub fn delete_shader(handle: ShaderHandle) { ctx().delete_shader(handle) } /// Create a new mesh object. #[inline] pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle> where T: Into<Option<MeshData>>, { ctx().create_mesh(params, data) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> { ctx().create_mesh_from(url) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> { ctx().create_mesh_from_uuid(uuid) } /// Gets the `MeshParams` if available. #[inline] pub fn mesh(handle: MeshHandle) -> Option<MeshParams> { ctx().mesh(handle) } /// Get the resource state of specified mesh. #[inline] pub fn mesh_state(handle: MeshHandle) -> ResourceState { ctx().mesh_state(handle) } /// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_vertex_buffer(handle, offset, data) } /// Update a subset of dynamic index buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_index_buffer(handle, offset, data) } /// Delete mesh object. #[inline] pub fn delete_mesh(handle: MeshHandle) { ctx().delete_mesh(handle); } /// Create texture object. A texture is an image loaded in video memory, /// which can be sampled in shaders. #[inline] pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle> where T: Into<Option<TextureData>>, { ctx().create_texture(params, data) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> { ctx().create_texture_from(url) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> { ctx().create_texture_from_uuid(uuid) } /// Get the resource state of specified texture. #[inline] pub fn texture_state(handle: TextureHandle) -> ResourceState { ctx().texture_state(handle) } /// Update a contiguous subregion of an existing two-dimensional texture object. #[inline] pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> { ctx().update_texture(handle, area, data) } /// Delete the texture object. #[inline] pub fn delete_texture(handle: TextureHandle) { ctx().delete_texture(handle); } /// Gets the `TextureParams` if available. #[inline] pub fn texture(handle: TextureHandle)->Option<TextureParams> { ctx().texture(handle) } /// Create render texture object, which could be attached with a framebuffer. #[inline] pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> { ctx().create_render_texture(params) } /// Gets the `RenderTextureParams` if available. #[inline] pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> { ctx().render_texture(handle) } /// Get the resource state of specified render texture. #[inline] pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState { ctx().render_texture_state(handle) } /// Delete the render texture object. #[inline] pub fn delete_render_texture(handle: RenderTextureHandle) { ctx().delete_render_texture(handle) } mod ins { use super::system::VideoSystem; pub static mut CTX: *const VideoSystem = std::ptr::null(); #[inline] pub fn ctx() -> &'static VideoSystem { unsafe { debug_assert!( !CTX.is_null(), "video system has not been initialized properly." ); &*CTX } } }
{ return; }
conditional_block
mod.rs
//! A stateless, layered, multithread video system with OpenGL backends. //! //! # Overview and Goals //! //! The management of video effects has become an important topic and key feature of //! rendering engines. With the increasing number of effects it is not sufficient anymore //! to only support them, but also to integrate them into the rendering engine in a clean //! and extensible way. //! //! The goal of this work and simultaneously its main contribution is to design and //! implement an advanced effects framework. Using this framework it should be easy for //! further applications to combine several small effects like texture mapping, shading //! and shadowing in an automated and transparent way and apply them to any 3D model. //! Additionally, it should be possible to integrate new effects and use the provided //! framework for rapid prototyping. //! //! ### Multi Platform //! //! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms. //! There still are a huge number of performance and feature limited devices, so this //! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0. //! //! ### Stateless Pipeline //! //! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This //! means whenever you change any state in the API for subsequent draw calls, this state //! change also affects draw calls submitted at a later point in time. Ideally, submitting //! a draw call with whatever state we want should not affect any of the other draw calls, //! even in multi-thread environments. //! //! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium) //! bundles render state and data into a few, precompiled resource objects which are //! combined into final render pipeline. We should follow the same philosophy. //! //! ### Multi-thread //! //! In most cases, dividing OpenGL rendering across multiple threads will not result in //! any performance improvement due the pipeline nature of OpenGL. What we are about //! to do is actually exploiting parallelism in resource preparation, and provides a set of //! multi-thread friendly APIs. //! //! The most common solution is by using a double-buffer of commands. This consists of //! running the renderer backend in a speparate thread, where all draw calls and communication //! with the OpenGL API are performed. The frontend thread that runs the game logic //! communicates with the backend renderer via a command double-buffer. //! //! ### Layered Rendering //! //! Its important to sort video commands (generated by different threads) before submiting //! them to OpenGL, for the sack of both correctness and performance. For example, to draw //! transparent objects via blending, we need draw opaque object first, usually from front-to-back, //! and draw translucents from back-to-front. //! //! The idea here is to assign a integer key to a command which is used for sorting. Depending //! on where those bits are stored in the integer, you can apply different sorting criteria //! for the same array of commands, as long as you know how the keys were built. //! //! # Resource Objects //! //! Render state and data, which are combined into final render pipeline, are bundled into a //! few, precompiled resource objects in video module. //! //! All resources types can be created instantly from data in memory, and meshes, textures //! can also be loaded asynchronously from the filesystem. //! //! And the actual resource objects are usually private and opaque, you will get a `Handle` //! immediately for every resource objects you created instead of some kind of reference. //! Its the unique identifier for the resource, its type-safe and copyable. //! //! When you are done with the created resource objects, its your responsiblity to delete the //! resource object with `Handle` to avoid leaks. //! //! For these things loaded from filesystem, it could be safely shared by the `Location`. We //! keeps a use-counting internally. It will not be freed really, before all the users deletes //! its `Handle`. //! //! ### Surface Object //! //! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted //! in application code is attached to a specific `Surface`. Commands inside `Surface` are //! sorted before submitting to underlying OpenGL. //! //! Surface object also holds references to render target, and wraps rendering operations to //! it. Likes clearing, offscreen-rendering, MSAA resolve etc.. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Creates a `SurfaceParams` object. //! let mut params = SurfaceParams::default(); //! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments //! /// and a optional `Depth/DepthStencil` buffer attachment. //! /// //! /// If none attachment is assigned, the default framebuffer generated by the system will be //! /// used. //! params.set_attachments(&[], None); //! // Sets the clear flags for this surface and its underlying framebuffer. //! params.set_clear(Color::white(), 1.0, None); //! //! // Creates an surface with `SurfaceParams`. //! let surface = video::create_surface(params).unwrap(); //! // Deletes the surface object. //! video::delete_surface(surface); //! ``` //! //! ### Shader Object //! //! Shader object is introduced to encapsulate all stateful things we need to configurate //! video pipeline. This would also enable us to easily change the order of draw calls //! and get rid of redundant state changes. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Declares the uniform variable layouts. //! let mut uniforms = UniformVariableLayout::build() //! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f) //! .with("u_MVPMatrix", UniformVariableType::Matrix4f) //! .finish(); //! //! // Declares the attributes. //! let attributes = AttributeLayout::build() //! .with(Attribute::Position, 3) //! .with(Attribute::Normal, 3) //! .finish(); //! //! let mut params = ShaderParams::default(); //! params.attributes = attributes; //! params.uniforms = uniforms; //! params.state = RenderState::default(); //! //! let vs = "..".into(); //! let fs = "..".into(); //! //! // Create a shader with initial shaders and render state. It encapusulates all the //! // informations we need to configurate graphics pipeline before real drawing. //! let shader = video::create_shader(params, vs, fs).unwrap(); //! //! // Deletes shader object. //! video::delete_shader(shader); //! ``` //! //! ### Texture Object //! //! A texture object is a container of one or more images. It can be the source of a texture //! access from a Shader. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = TextureParams::default(); //! //! // Create a texture object with optional data. You can fill it later with `update_texture`. //! let texture = video::create_texture(params, None).unwrap(); //! //! // Deletes the texture object. //! video::delete_texture(texture); //! ``` //! //! #### Compressed Texture Format //! //! _TODO_: Cube texture. //! _TODO_: 3D texture. //! //! ### Mesh Object //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = MeshParams::default(); //! //! // Create a mesh object with optional data. You can fill it later with `update_mesh`. //! let mesh = video::create_mesh(params, None).unwrap(); //! //! // Deletes the mesh object. //! video::delete_mesh(mesh); //! ``` //! //! # Commands //! //! _TODO_: CommandBuffer //! _TODO_: DrawCommandBuffer /// Maximum number of attributes in vertex layout. pub const MAX_VERTEX_ATTRIBUTES: usize = 12; /// Maximum number of attachments in framebuffer. pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8; /// Maximum number of uniform variables in shader. pub const MAX_UNIFORM_VARIABLES: usize = 32; /// Maximum number of textures in shader. pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8; #[macro_use] pub mod assets; pub mod command; pub mod errors; mod system; mod backends; pub mod prelude { pub use super::assets::prelude::*; pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer}; } use std::sync::Arc; use uuid::Uuid; use crate::math::prelude::Aabb2; use crate::prelude::CrResult; use crate::res::utils::prelude::ResourceState; use crate::utils::double_buf::DoubleBuf; use self::assets::prelude::*; use self::backends::frame::Frame; use self::errors::*; use self::ins::{ctx, CTX}; use self::system::VideoSystem; /// Setup the video system. pub(crate) unsafe fn setup() -> CrResult<()> { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::new()?; CTX = Box::into_raw(Box::new(ctx)); Ok(()) } /// Setup the video system. pub(crate) unsafe fn headless() { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::headless(); CTX = Box::into_raw(Box::new(ctx)); } /// Discard the video system. pub(crate) unsafe fn discard() { if CTX.is_null() { return; } drop(Box::from_raw(CTX as *mut VideoSystem)); CTX = std::ptr::null(); } pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> { ctx().frames() } /// Creates an surface with `SurfaceParams`. #[inline] pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> { ctx().create_surface(params) } /// Gets the `SurfaceParams` if available. #[inline] pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> { ctx().surface(handle) } /// Get the resource state of specified surface. #[inline] pub fn surface_state(handle: SurfaceHandle) -> ResourceState { ctx().surface_state(handle) } /// Deletes surface object. #[inline] pub fn delete_surface(handle: SurfaceHandle) { ctx().delete_surface(handle) } /// Create a shader with initial shaders and render state. It encapusulates all the /// informations we need to configurate graphics pipeline before real drawing.
/// Gets the `ShaderParams` if available. #[inline] pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> { ctx().shader(handle) } /// Get the resource state of specified shader. #[inline] pub fn shader_state(handle: ShaderHandle) -> ResourceState { ctx().shader_state(handle) } /// Delete shader state object. #[inline] pub fn delete_shader(handle: ShaderHandle) { ctx().delete_shader(handle) } /// Create a new mesh object. #[inline] pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle> where T: Into<Option<MeshData>>, { ctx().create_mesh(params, data) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> { ctx().create_mesh_from(url) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> { ctx().create_mesh_from_uuid(uuid) } /// Gets the `MeshParams` if available. #[inline] pub fn mesh(handle: MeshHandle) -> Option<MeshParams> { ctx().mesh(handle) } /// Get the resource state of specified mesh. #[inline] pub fn mesh_state(handle: MeshHandle) -> ResourceState { ctx().mesh_state(handle) } /// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_vertex_buffer(handle, offset, data) } /// Update a subset of dynamic index buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_index_buffer(handle, offset, data) } /// Delete mesh object. #[inline] pub fn delete_mesh(handle: MeshHandle) { ctx().delete_mesh(handle); } /// Create texture object. A texture is an image loaded in video memory, /// which can be sampled in shaders. #[inline] pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle> where T: Into<Option<TextureData>>, { ctx().create_texture(params, data) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> { ctx().create_texture_from(url) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> { ctx().create_texture_from_uuid(uuid) } /// Get the resource state of specified texture. #[inline] pub fn texture_state(handle: TextureHandle) -> ResourceState { ctx().texture_state(handle) } /// Update a contiguous subregion of an existing two-dimensional texture object. #[inline] pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> { ctx().update_texture(handle, area, data) } /// Delete the texture object. #[inline] pub fn delete_texture(handle: TextureHandle) { ctx().delete_texture(handle); } /// Gets the `TextureParams` if available. #[inline] pub fn texture(handle: TextureHandle)->Option<TextureParams> { ctx().texture(handle) } /// Create render texture object, which could be attached with a framebuffer. #[inline] pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> { ctx().create_render_texture(params) } /// Gets the `RenderTextureParams` if available. #[inline] pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> { ctx().render_texture(handle) } /// Get the resource state of specified render texture. #[inline] pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState { ctx().render_texture_state(handle) } /// Delete the render texture object. #[inline] pub fn delete_render_texture(handle: RenderTextureHandle) { ctx().delete_render_texture(handle) } mod ins { use super::system::VideoSystem; pub static mut CTX: *const VideoSystem = std::ptr::null(); #[inline] pub fn ctx() -> &'static VideoSystem { unsafe { debug_assert!( !CTX.is_null(), "video system has not been initialized properly." ); &*CTX } } }
#[inline] pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> { ctx().create_shader(params, vs, fs) }
random_line_split
mod.rs
//! A stateless, layered, multithread video system with OpenGL backends. //! //! # Overview and Goals //! //! The management of video effects has become an important topic and key feature of //! rendering engines. With the increasing number of effects it is not sufficient anymore //! to only support them, but also to integrate them into the rendering engine in a clean //! and extensible way. //! //! The goal of this work and simultaneously its main contribution is to design and //! implement an advanced effects framework. Using this framework it should be easy for //! further applications to combine several small effects like texture mapping, shading //! and shadowing in an automated and transparent way and apply them to any 3D model. //! Additionally, it should be possible to integrate new effects and use the provided //! framework for rapid prototyping. //! //! ### Multi Platform //! //! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms. //! There still are a huge number of performance and feature limited devices, so this //! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0. //! //! ### Stateless Pipeline //! //! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This //! means whenever you change any state in the API for subsequent draw calls, this state //! change also affects draw calls submitted at a later point in time. Ideally, submitting //! a draw call with whatever state we want should not affect any of the other draw calls, //! even in multi-thread environments. //! //! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium) //! bundles render state and data into a few, precompiled resource objects which are //! combined into final render pipeline. We should follow the same philosophy. //! //! ### Multi-thread //! //! In most cases, dividing OpenGL rendering across multiple threads will not result in //! any performance improvement due the pipeline nature of OpenGL. What we are about //! to do is actually exploiting parallelism in resource preparation, and provides a set of //! multi-thread friendly APIs. //! //! The most common solution is by using a double-buffer of commands. This consists of //! running the renderer backend in a speparate thread, where all draw calls and communication //! with the OpenGL API are performed. The frontend thread that runs the game logic //! communicates with the backend renderer via a command double-buffer. //! //! ### Layered Rendering //! //! Its important to sort video commands (generated by different threads) before submiting //! them to OpenGL, for the sack of both correctness and performance. For example, to draw //! transparent objects via blending, we need draw opaque object first, usually from front-to-back, //! and draw translucents from back-to-front. //! //! The idea here is to assign a integer key to a command which is used for sorting. Depending //! on where those bits are stored in the integer, you can apply different sorting criteria //! for the same array of commands, as long as you know how the keys were built. //! //! # Resource Objects //! //! Render state and data, which are combined into final render pipeline, are bundled into a //! few, precompiled resource objects in video module. //! //! All resources types can be created instantly from data in memory, and meshes, textures //! can also be loaded asynchronously from the filesystem. //! //! And the actual resource objects are usually private and opaque, you will get a `Handle` //! immediately for every resource objects you created instead of some kind of reference. //! Its the unique identifier for the resource, its type-safe and copyable. //! //! When you are done with the created resource objects, its your responsiblity to delete the //! resource object with `Handle` to avoid leaks. //! //! For these things loaded from filesystem, it could be safely shared by the `Location`. We //! keeps a use-counting internally. It will not be freed really, before all the users deletes //! its `Handle`. //! //! ### Surface Object //! //! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted //! in application code is attached to a specific `Surface`. Commands inside `Surface` are //! sorted before submitting to underlying OpenGL. //! //! Surface object also holds references to render target, and wraps rendering operations to //! it. Likes clearing, offscreen-rendering, MSAA resolve etc.. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Creates a `SurfaceParams` object. //! let mut params = SurfaceParams::default(); //! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments //! /// and a optional `Depth/DepthStencil` buffer attachment. //! /// //! /// If none attachment is assigned, the default framebuffer generated by the system will be //! /// used. //! params.set_attachments(&[], None); //! // Sets the clear flags for this surface and its underlying framebuffer. //! params.set_clear(Color::white(), 1.0, None); //! //! // Creates an surface with `SurfaceParams`. //! let surface = video::create_surface(params).unwrap(); //! // Deletes the surface object. //! video::delete_surface(surface); //! ``` //! //! ### Shader Object //! //! Shader object is introduced to encapsulate all stateful things we need to configurate //! video pipeline. This would also enable us to easily change the order of draw calls //! and get rid of redundant state changes. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Declares the uniform variable layouts. //! let mut uniforms = UniformVariableLayout::build() //! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f) //! .with("u_MVPMatrix", UniformVariableType::Matrix4f) //! .finish(); //! //! // Declares the attributes. //! let attributes = AttributeLayout::build() //! .with(Attribute::Position, 3) //! .with(Attribute::Normal, 3) //! .finish(); //! //! let mut params = ShaderParams::default(); //! params.attributes = attributes; //! params.uniforms = uniforms; //! params.state = RenderState::default(); //! //! let vs = "..".into(); //! let fs = "..".into(); //! //! // Create a shader with initial shaders and render state. It encapusulates all the //! // informations we need to configurate graphics pipeline before real drawing. //! let shader = video::create_shader(params, vs, fs).unwrap(); //! //! // Deletes shader object. //! video::delete_shader(shader); //! ``` //! //! ### Texture Object //! //! A texture object is a container of one or more images. It can be the source of a texture //! access from a Shader. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = TextureParams::default(); //! //! // Create a texture object with optional data. You can fill it later with `update_texture`. //! let texture = video::create_texture(params, None).unwrap(); //! //! // Deletes the texture object. //! video::delete_texture(texture); //! ``` //! //! #### Compressed Texture Format //! //! _TODO_: Cube texture. //! _TODO_: 3D texture. //! //! ### Mesh Object //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = MeshParams::default(); //! //! // Create a mesh object with optional data. You can fill it later with `update_mesh`. //! let mesh = video::create_mesh(params, None).unwrap(); //! //! // Deletes the mesh object. //! video::delete_mesh(mesh); //! ``` //! //! # Commands //! //! _TODO_: CommandBuffer //! _TODO_: DrawCommandBuffer /// Maximum number of attributes in vertex layout. pub const MAX_VERTEX_ATTRIBUTES: usize = 12; /// Maximum number of attachments in framebuffer. pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8; /// Maximum number of uniform variables in shader. pub const MAX_UNIFORM_VARIABLES: usize = 32; /// Maximum number of textures in shader. pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8; #[macro_use] pub mod assets; pub mod command; pub mod errors; mod system; mod backends; pub mod prelude { pub use super::assets::prelude::*; pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer}; } use std::sync::Arc; use uuid::Uuid; use crate::math::prelude::Aabb2; use crate::prelude::CrResult; use crate::res::utils::prelude::ResourceState; use crate::utils::double_buf::DoubleBuf; use self::assets::prelude::*; use self::backends::frame::Frame; use self::errors::*; use self::ins::{ctx, CTX}; use self::system::VideoSystem; /// Setup the video system. pub(crate) unsafe fn setup() -> CrResult<()> { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::new()?; CTX = Box::into_raw(Box::new(ctx)); Ok(()) } /// Setup the video system. pub(crate) unsafe fn headless() { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::headless(); CTX = Box::into_raw(Box::new(ctx)); } /// Discard the video system. pub(crate) unsafe fn discard() { if CTX.is_null() { return; } drop(Box::from_raw(CTX as *mut VideoSystem)); CTX = std::ptr::null(); } pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> { ctx().frames() } /// Creates an surface with `SurfaceParams`. #[inline] pub fn
(params: SurfaceParams) -> Result<SurfaceHandle> { ctx().create_surface(params) } /// Gets the `SurfaceParams` if available. #[inline] pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> { ctx().surface(handle) } /// Get the resource state of specified surface. #[inline] pub fn surface_state(handle: SurfaceHandle) -> ResourceState { ctx().surface_state(handle) } /// Deletes surface object. #[inline] pub fn delete_surface(handle: SurfaceHandle) { ctx().delete_surface(handle) } /// Create a shader with initial shaders and render state. It encapusulates all the /// informations we need to configurate graphics pipeline before real drawing. #[inline] pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> { ctx().create_shader(params, vs, fs) } /// Gets the `ShaderParams` if available. #[inline] pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> { ctx().shader(handle) } /// Get the resource state of specified shader. #[inline] pub fn shader_state(handle: ShaderHandle) -> ResourceState { ctx().shader_state(handle) } /// Delete shader state object. #[inline] pub fn delete_shader(handle: ShaderHandle) { ctx().delete_shader(handle) } /// Create a new mesh object. #[inline] pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle> where T: Into<Option<MeshData>>, { ctx().create_mesh(params, data) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> { ctx().create_mesh_from(url) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> { ctx().create_mesh_from_uuid(uuid) } /// Gets the `MeshParams` if available. #[inline] pub fn mesh(handle: MeshHandle) -> Option<MeshParams> { ctx().mesh(handle) } /// Get the resource state of specified mesh. #[inline] pub fn mesh_state(handle: MeshHandle) -> ResourceState { ctx().mesh_state(handle) } /// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_vertex_buffer(handle, offset, data) } /// Update a subset of dynamic index buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_index_buffer(handle, offset, data) } /// Delete mesh object. #[inline] pub fn delete_mesh(handle: MeshHandle) { ctx().delete_mesh(handle); } /// Create texture object. A texture is an image loaded in video memory, /// which can be sampled in shaders. #[inline] pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle> where T: Into<Option<TextureData>>, { ctx().create_texture(params, data) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> { ctx().create_texture_from(url) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> { ctx().create_texture_from_uuid(uuid) } /// Get the resource state of specified texture. #[inline] pub fn texture_state(handle: TextureHandle) -> ResourceState { ctx().texture_state(handle) } /// Update a contiguous subregion of an existing two-dimensional texture object. #[inline] pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> { ctx().update_texture(handle, area, data) } /// Delete the texture object. #[inline] pub fn delete_texture(handle: TextureHandle) { ctx().delete_texture(handle); } /// Gets the `TextureParams` if available. #[inline] pub fn texture(handle: TextureHandle)->Option<TextureParams> { ctx().texture(handle) } /// Create render texture object, which could be attached with a framebuffer. #[inline] pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> { ctx().create_render_texture(params) } /// Gets the `RenderTextureParams` if available. #[inline] pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> { ctx().render_texture(handle) } /// Get the resource state of specified render texture. #[inline] pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState { ctx().render_texture_state(handle) } /// Delete the render texture object. #[inline] pub fn delete_render_texture(handle: RenderTextureHandle) { ctx().delete_render_texture(handle) } mod ins { use super::system::VideoSystem; pub static mut CTX: *const VideoSystem = std::ptr::null(); #[inline] pub fn ctx() -> &'static VideoSystem { unsafe { debug_assert!( !CTX.is_null(), "video system has not been initialized properly." ); &*CTX } } }
create_surface
identifier_name
mod.rs
//! A stateless, layered, multithread video system with OpenGL backends. //! //! # Overview and Goals //! //! The management of video effects has become an important topic and key feature of //! rendering engines. With the increasing number of effects it is not sufficient anymore //! to only support them, but also to integrate them into the rendering engine in a clean //! and extensible way. //! //! The goal of this work and simultaneously its main contribution is to design and //! implement an advanced effects framework. Using this framework it should be easy for //! further applications to combine several small effects like texture mapping, shading //! and shadowing in an automated and transparent way and apply them to any 3D model. //! Additionally, it should be possible to integrate new effects and use the provided //! framework for rapid prototyping. //! //! ### Multi Platform //! //! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms. //! There still are a huge number of performance and feature limited devices, so this //! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0. //! //! ### Stateless Pipeline //! //! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This //! means whenever you change any state in the API for subsequent draw calls, this state //! change also affects draw calls submitted at a later point in time. Ideally, submitting //! a draw call with whatever state we want should not affect any of the other draw calls, //! even in multi-thread environments. //! //! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium) //! bundles render state and data into a few, precompiled resource objects which are //! combined into final render pipeline. We should follow the same philosophy. //! //! ### Multi-thread //! //! In most cases, dividing OpenGL rendering across multiple threads will not result in //! any performance improvement due the pipeline nature of OpenGL. What we are about //! to do is actually exploiting parallelism in resource preparation, and provides a set of //! multi-thread friendly APIs. //! //! The most common solution is by using a double-buffer of commands. This consists of //! running the renderer backend in a speparate thread, where all draw calls and communication //! with the OpenGL API are performed. The frontend thread that runs the game logic //! communicates with the backend renderer via a command double-buffer. //! //! ### Layered Rendering //! //! Its important to sort video commands (generated by different threads) before submiting //! them to OpenGL, for the sack of both correctness and performance. For example, to draw //! transparent objects via blending, we need draw opaque object first, usually from front-to-back, //! and draw translucents from back-to-front. //! //! The idea here is to assign a integer key to a command which is used for sorting. Depending //! on where those bits are stored in the integer, you can apply different sorting criteria //! for the same array of commands, as long as you know how the keys were built. //! //! # Resource Objects //! //! Render state and data, which are combined into final render pipeline, are bundled into a //! few, precompiled resource objects in video module. //! //! All resources types can be created instantly from data in memory, and meshes, textures //! can also be loaded asynchronously from the filesystem. //! //! And the actual resource objects are usually private and opaque, you will get a `Handle` //! immediately for every resource objects you created instead of some kind of reference. //! Its the unique identifier for the resource, its type-safe and copyable. //! //! When you are done with the created resource objects, its your responsiblity to delete the //! resource object with `Handle` to avoid leaks. //! //! For these things loaded from filesystem, it could be safely shared by the `Location`. We //! keeps a use-counting internally. It will not be freed really, before all the users deletes //! its `Handle`. //! //! ### Surface Object //! //! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted //! in application code is attached to a specific `Surface`. Commands inside `Surface` are //! sorted before submitting to underlying OpenGL. //! //! Surface object also holds references to render target, and wraps rendering operations to //! it. Likes clearing, offscreen-rendering, MSAA resolve etc.. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Creates a `SurfaceParams` object. //! let mut params = SurfaceParams::default(); //! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments //! /// and a optional `Depth/DepthStencil` buffer attachment. //! /// //! /// If none attachment is assigned, the default framebuffer generated by the system will be //! /// used. //! params.set_attachments(&[], None); //! // Sets the clear flags for this surface and its underlying framebuffer. //! params.set_clear(Color::white(), 1.0, None); //! //! // Creates an surface with `SurfaceParams`. //! let surface = video::create_surface(params).unwrap(); //! // Deletes the surface object. //! video::delete_surface(surface); //! ``` //! //! ### Shader Object //! //! Shader object is introduced to encapsulate all stateful things we need to configurate //! video pipeline. This would also enable us to easily change the order of draw calls //! and get rid of redundant state changes. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! // Declares the uniform variable layouts. //! let mut uniforms = UniformVariableLayout::build() //! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f) //! .with("u_MVPMatrix", UniformVariableType::Matrix4f) //! .finish(); //! //! // Declares the attributes. //! let attributes = AttributeLayout::build() //! .with(Attribute::Position, 3) //! .with(Attribute::Normal, 3) //! .finish(); //! //! let mut params = ShaderParams::default(); //! params.attributes = attributes; //! params.uniforms = uniforms; //! params.state = RenderState::default(); //! //! let vs = "..".into(); //! let fs = "..".into(); //! //! // Create a shader with initial shaders and render state. It encapusulates all the //! // informations we need to configurate graphics pipeline before real drawing. //! let shader = video::create_shader(params, vs, fs).unwrap(); //! //! // Deletes shader object. //! video::delete_shader(shader); //! ``` //! //! ### Texture Object //! //! A texture object is a container of one or more images. It can be the source of a texture //! access from a Shader. //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = TextureParams::default(); //! //! // Create a texture object with optional data. You can fill it later with `update_texture`. //! let texture = video::create_texture(params, None).unwrap(); //! //! // Deletes the texture object. //! video::delete_texture(texture); //! ``` //! //! #### Compressed Texture Format //! //! _TODO_: Cube texture. //! _TODO_: 3D texture. //! //! ### Mesh Object //! //! ```rust //! use crayon::prelude::*; //! application::oneshot().unwrap(); //! //! let mut params = MeshParams::default(); //! //! // Create a mesh object with optional data. You can fill it later with `update_mesh`. //! let mesh = video::create_mesh(params, None).unwrap(); //! //! // Deletes the mesh object. //! video::delete_mesh(mesh); //! ``` //! //! # Commands //! //! _TODO_: CommandBuffer //! _TODO_: DrawCommandBuffer /// Maximum number of attributes in vertex layout. pub const MAX_VERTEX_ATTRIBUTES: usize = 12; /// Maximum number of attachments in framebuffer. pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8; /// Maximum number of uniform variables in shader. pub const MAX_UNIFORM_VARIABLES: usize = 32; /// Maximum number of textures in shader. pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8; #[macro_use] pub mod assets; pub mod command; pub mod errors; mod system; mod backends; pub mod prelude { pub use super::assets::prelude::*; pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer}; } use std::sync::Arc; use uuid::Uuid; use crate::math::prelude::Aabb2; use crate::prelude::CrResult; use crate::res::utils::prelude::ResourceState; use crate::utils::double_buf::DoubleBuf; use self::assets::prelude::*; use self::backends::frame::Frame; use self::errors::*; use self::ins::{ctx, CTX}; use self::system::VideoSystem; /// Setup the video system. pub(crate) unsafe fn setup() -> CrResult<()> { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::new()?; CTX = Box::into_raw(Box::new(ctx)); Ok(()) } /// Setup the video system. pub(crate) unsafe fn headless() { debug_assert!(CTX.is_null(), "duplicated setup of video system."); let ctx = VideoSystem::headless(); CTX = Box::into_raw(Box::new(ctx)); } /// Discard the video system. pub(crate) unsafe fn discard() { if CTX.is_null() { return; } drop(Box::from_raw(CTX as *mut VideoSystem)); CTX = std::ptr::null(); } pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> { ctx().frames() } /// Creates an surface with `SurfaceParams`. #[inline] pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> { ctx().create_surface(params) } /// Gets the `SurfaceParams` if available. #[inline] pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> { ctx().surface(handle) } /// Get the resource state of specified surface. #[inline] pub fn surface_state(handle: SurfaceHandle) -> ResourceState { ctx().surface_state(handle) } /// Deletes surface object. #[inline] pub fn delete_surface(handle: SurfaceHandle) { ctx().delete_surface(handle) } /// Create a shader with initial shaders and render state. It encapusulates all the /// informations we need to configurate graphics pipeline before real drawing. #[inline] pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> { ctx().create_shader(params, vs, fs) } /// Gets the `ShaderParams` if available. #[inline] pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> { ctx().shader(handle) } /// Get the resource state of specified shader. #[inline] pub fn shader_state(handle: ShaderHandle) -> ResourceState { ctx().shader_state(handle) } /// Delete shader state object. #[inline] pub fn delete_shader(handle: ShaderHandle) { ctx().delete_shader(handle) } /// Create a new mesh object. #[inline] pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle> where T: Into<Option<MeshData>>, { ctx().create_mesh(params, data) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> { ctx().create_mesh_from(url) } /// Creates a mesh object from file asynchronously. #[inline] pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> { ctx().create_mesh_from_uuid(uuid) } /// Gets the `MeshParams` if available. #[inline] pub fn mesh(handle: MeshHandle) -> Option<MeshParams> { ctx().mesh(handle) } /// Get the resource state of specified mesh. #[inline] pub fn mesh_state(handle: MeshHandle) -> ResourceState { ctx().mesh_state(handle) } /// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_vertex_buffer(handle, offset, data) } /// Update a subset of dynamic index buffer. Use `offset` specifies the offset /// into the buffer object's data store where data replacement will begin, measured /// in bytes. #[inline] pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> { ctx().update_index_buffer(handle, offset, data) } /// Delete mesh object. #[inline] pub fn delete_mesh(handle: MeshHandle) { ctx().delete_mesh(handle); } /// Create texture object. A texture is an image loaded in video memory, /// which can be sampled in shaders. #[inline] pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle> where T: Into<Option<TextureData>>, { ctx().create_texture(params, data) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> { ctx().create_texture_from(url) } /// Creates a texture object from file asynchronously. #[inline] pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> { ctx().create_texture_from_uuid(uuid) } /// Get the resource state of specified texture. #[inline] pub fn texture_state(handle: TextureHandle) -> ResourceState { ctx().texture_state(handle) } /// Update a contiguous subregion of an existing two-dimensional texture object. #[inline] pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> { ctx().update_texture(handle, area, data) } /// Delete the texture object. #[inline] pub fn delete_texture(handle: TextureHandle) { ctx().delete_texture(handle); } /// Gets the `TextureParams` if available. #[inline] pub fn texture(handle: TextureHandle)->Option<TextureParams> { ctx().texture(handle) } /// Create render texture object, which could be attached with a framebuffer. #[inline] pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> { ctx().create_render_texture(params) } /// Gets the `RenderTextureParams` if available. #[inline] pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> { ctx().render_texture(handle) } /// Get the resource state of specified render texture. #[inline] pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState { ctx().render_texture_state(handle) } /// Delete the render texture object. #[inline] pub fn delete_render_texture(handle: RenderTextureHandle)
mod ins { use super::system::VideoSystem; pub static mut CTX: *const VideoSystem = std::ptr::null(); #[inline] pub fn ctx() -> &'static VideoSystem { unsafe { debug_assert!( !CTX.is_null(), "video system has not been initialized properly." ); &*CTX } } }
{ ctx().delete_render_texture(handle) }
identifier_body
rt_threaded.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(tokio_wasi)))] use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio::runtime; use tokio::sync::oneshot; use tokio_test::{assert_err, assert_ok}; use futures::future::poll_fn; use std::future::Future; use std::pin::Pin; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use std::sync::{mpsc, Arc, Mutex}; use std::task::{Context, Poll, Waker}; macro_rules! cfg_metrics { ($($t:tt)*) => { #[cfg(tokio_unstable)] { $( $t )* } } } #[test] fn single_thread() { // No panic when starting a runtime w/ a single thread let _ = runtime::Builder::new_multi_thread() .enable_all() .worker_threads(1) .build(); } #[test] fn many_oneshot_futures() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..5 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM { tx.send(()).unwrap(); } }); } rx.recv().unwrap(); // Wait for the pool to shutdown drop(rt); } } #[test] fn spawn_two() { let rt = rt(); let out = rt.block_on(async { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { tokio::spawn(async move { tx.send("ZOMG").unwrap(); }); }); assert_ok!(rx.await) }); assert_eq!(out, "ZOMG"); cfg_metrics! { let metrics = rt.metrics(); drop(rt); assert_eq!(1, metrics.remote_schedule_count()); let mut local = 0; for i in 0..metrics.num_workers() { local += metrics.worker_local_schedule_count(i); } assert_eq!(1, local); } } #[test] fn many_multishot_futures() { const CHAIN: usize = 200; const CYCLES: usize = 5; const TRACKS: usize = 50; for _ in 0..50 { let rt = rt(); let mut start_txs = Vec::with_capacity(TRACKS); let mut final_rxs = Vec::with_capacity(TRACKS); for _ in 0..TRACKS { let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10); for _ in 0..CHAIN { let (next_tx, next_rx) = tokio::sync::mpsc::channel(10); // Forward all the messages rt.spawn(async move { while let Some(v) = chain_rx.recv().await { next_tx.send(v).await.unwrap(); } }); chain_rx = next_rx; } // This final task cycles if needed let (final_tx, final_rx) = tokio::sync::mpsc::channel(10); let cycle_tx = start_tx.clone(); let mut rem = CYCLES; rt.spawn(async move { for _ in 0..CYCLES { let msg = chain_rx.recv().await.unwrap(); rem -= 1; if rem == 0 { final_tx.send(msg).await.unwrap(); } else { cycle_tx.send(msg).await.unwrap(); } } }); start_txs.push(start_tx); final_rxs.push(final_rx); } { rt.block_on(async move { for start_tx in start_txs { start_tx.send("ping").await.unwrap(); } for mut final_rx in final_rxs { final_rx.recv().await.unwrap(); } }); } } } #[test] fn spawn_shutdown() { let rt = rt(); let (tx, rx) = mpsc::channel(); rt.block_on(async { tokio::spawn(client_server(tx.clone())); }); // Use spawner rt.spawn(client_server(tx)); assert_ok!(rx.recv()); assert_ok!(rx.recv()); drop(rt); assert_err!(rx.try_recv()); } async fn client_server(tx: mpsc::Sender<()>) { let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); // Get the assigned address let addr = assert_ok!(server.local_addr()); // Spawn the server tokio::spawn(async move { // Accept a socket let (mut socket, _) = server.accept().await.unwrap(); // Write some data socket.write_all(b"hello").await.unwrap(); }); let mut client = TcpStream::connect(&addr).await.unwrap(); let mut buf = vec![]; client.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"hello"); tx.send(()).unwrap(); } #[test] fn drop_threadpool_drops_futures() { for _ in 0..1_000 { let num_inc = Arc::new(AtomicUsize::new(0)); let num_dec = Arc::new(AtomicUsize::new(0)); let num_drop = Arc::new(AtomicUsize::new(0)); struct Never(Arc<AtomicUsize>); impl Future for Never { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { Poll::Pending } } impl Drop for Never { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } let a = num_inc.clone(); let b = num_dec.clone(); let rt = runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { a.fetch_add(1, Relaxed); }) .on_thread_stop(move || { b.fetch_add(1, Relaxed); }) .build() .unwrap(); rt.spawn(Never(num_drop.clone())); // Wait for the pool to shutdown drop(rt); // Assert that only a single thread was spawned. let a = num_inc.load(Relaxed); assert!(a >= 1); // Assert that all threads shutdown let b = num_dec.load(Relaxed); assert_eq!(a, b); // Assert that the future was dropped let c = num_drop.load(Relaxed); assert_eq!(c, 1); } } #[test] fn start_stop_callbacks_called() { use std::sync::atomic::{AtomicUsize, Ordering}; let after_start = Arc::new(AtomicUsize::new(0)); let before_stop = Arc::new(AtomicUsize::new(0)); let after_inner = after_start.clone(); let before_inner = before_stop.clone(); let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { after_inner.clone().fetch_add(1, Ordering::Relaxed); }) .on_thread_stop(move || { before_inner.clone().fetch_add(1, Ordering::Relaxed); }) .build() .unwrap(); let (tx, rx) = oneshot::channel(); rt.spawn(async move { assert_ok!(tx.send(())); }); assert_ok!(rt.block_on(rx)); drop(rt); assert!(after_start.load(Ordering::Relaxed) > 0); assert!(before_stop.load(Ordering::Relaxed) > 0); } #[test] fn blocking() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..10 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); // there are four workers in the pool // so, if we run 4 blocking tasks, we know that handoff must have happened let block = Arc::new(std::sync::Barrier::new(5)); for _ in 0..4 { let block = block.clone(); rt.spawn(async move { tokio::task::block_in_place(move || { block.wait(); block.wait(); }) }); } block.wait(); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM
}); } rx.recv().unwrap(); // Wait for the pool to shutdown block.wait(); } } #[test] fn multi_threadpool() { use tokio::sync::oneshot; let rt1 = rt(); let rt2 = rt(); let (tx, rx) = oneshot::channel(); let (done_tx, done_rx) = mpsc::channel(); rt2.spawn(async move { rx.await.unwrap(); done_tx.send(()).unwrap(); }); rt1.spawn(async move { tx.send(()).unwrap(); }); done_rx.recv().unwrap(); } // When `block_in_place` returns, it attempts to reclaim the yielded runtime // worker. In this case, the remainder of the task is on the runtime worker and // must take part in the cooperative task budgeting system. // // The test ensures that, when this happens, attempting to consume from a // channel yields occasionally even if there are values ready to receive. #[test] fn coop_and_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() // Setting max threads to 1 prevents another thread from claiming the // runtime worker yielded as part of `block_in_place` and guarantees the // same thread will reclaim the worker at the end of the // `block_in_place` call. .max_blocking_threads(1) .build() .unwrap(); rt.block_on(async move { let (tx, mut rx) = tokio::sync::mpsc::channel(1024); // Fill the channel for _ in 0..1024 { tx.send(()).await.unwrap(); } drop(tx); tokio::spawn(async move { // Block in place without doing anything tokio::task::block_in_place(|| {}); // Receive all the values, this should trigger a `Pending` as the // coop limit will be reached. poll_fn(|cx| { while let Poll::Ready(v) = { tokio::pin! { let fut = rx.recv(); } Pin::new(&mut fut).poll(cx) } { if v.is_none() { panic!("did not yield"); } } Poll::Ready(()) }) .await }) .await .unwrap(); }); } #[test] fn yield_after_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .build() .unwrap(); rt.block_on(async { tokio::spawn(async move { // Block in place then enter a new runtime tokio::task::block_in_place(|| { let rt = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); rt.block_on(async {}); }); // Yield, then complete tokio::task::yield_now().await; }) .await .unwrap() }); } // Testing this does not panic #[test] fn max_blocking_threads() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(1) .build() .unwrap(); } #[test] #[should_panic] fn max_blocking_threads_set_to_zero() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(0) .build() .unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn hang_on_shutdown() { let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>(); tokio::spawn(async move { tokio::task::block_in_place(|| sync_rx.recv().ok()); }); tokio::spawn(async { tokio::time::sleep(std::time::Duration::from_secs(2)).await; drop(sync_tx); }); tokio::time::sleep(std::time::Duration::from_secs(1)).await; } /// Demonstrates tokio-rs/tokio#3869 #[test] fn wake_during_shutdown() { struct Shared { waker: Option<Waker>, } struct MyFuture { shared: Arc<Mutex<Shared>>, put_waker: bool, } impl MyFuture { fn new() -> (Self, Self) { let shared = Arc::new(Mutex::new(Shared { waker: None })); let f1 = MyFuture { shared: shared.clone(), put_waker: true, }; let f2 = MyFuture { shared, put_waker: false, }; (f1, f2) } } impl Future for MyFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let me = Pin::into_inner(self); let mut lock = me.shared.lock().unwrap(); if me.put_waker { lock.waker = Some(cx.waker().clone()); } Poll::Pending } } impl Drop for MyFuture { fn drop(&mut self) { let mut lock = self.shared.lock().unwrap(); if!self.put_waker { lock.waker.take().unwrap().wake(); } drop(lock); } } let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); let (f1, f2) = MyFuture::new(); rt.spawn(f1); rt.spawn(f2); rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await }); } #[should_panic] #[tokio::test] async fn test_block_in_place1() { tokio::task::block_in_place(|| {}); } #[tokio::test(flavor = "multi_thread")] async fn test_block_in_place2() { tokio::task::block_in_place(|| {}); } #[should_panic] #[tokio::main(flavor = "current_thread")] #[test] async fn test_block_in_place3() { tokio::task::block_in_place(|| {}); } #[tokio::main] #[test] async fn test_block_in_place4() { tokio::task::block_in_place(|| {}); } fn rt() -> runtime::Runtime { runtime::Runtime::new().unwrap() } #[cfg(tokio_unstable)] mod unstable { use super::*; #[test] fn test_disable_lifo_slot() { let rt = runtime::Builder::new_multi_thread() .disable_lifo_slot() .worker_threads(2) .build() .unwrap(); rt.block_on(async { tokio::spawn(async { // Spawn another task and block the thread until completion. If the LIFO slot // is used then the test doesn't complete. futures::executor::block_on(tokio::spawn(async {})).unwrap(); }) .await .unwrap(); }) } }
{ tx.send(()).unwrap(); }
conditional_block
rt_threaded.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(tokio_wasi)))] use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio::runtime; use tokio::sync::oneshot; use tokio_test::{assert_err, assert_ok}; use futures::future::poll_fn; use std::future::Future; use std::pin::Pin; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use std::sync::{mpsc, Arc, Mutex}; use std::task::{Context, Poll, Waker}; macro_rules! cfg_metrics { ($($t:tt)*) => { #[cfg(tokio_unstable)] { $( $t )* } } } #[test] fn single_thread() { // No panic when starting a runtime w/ a single thread let _ = runtime::Builder::new_multi_thread() .enable_all() .worker_threads(1) .build(); } #[test] fn many_oneshot_futures() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..5 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM { tx.send(()).unwrap(); } }); } rx.recv().unwrap(); // Wait for the pool to shutdown drop(rt); } } #[test] fn spawn_two() { let rt = rt(); let out = rt.block_on(async { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { tokio::spawn(async move { tx.send("ZOMG").unwrap(); }); }); assert_ok!(rx.await) }); assert_eq!(out, "ZOMG"); cfg_metrics! { let metrics = rt.metrics(); drop(rt); assert_eq!(1, metrics.remote_schedule_count()); let mut local = 0; for i in 0..metrics.num_workers() { local += metrics.worker_local_schedule_count(i); } assert_eq!(1, local); } } #[test] fn many_multishot_futures() {
const TRACKS: usize = 50; for _ in 0..50 { let rt = rt(); let mut start_txs = Vec::with_capacity(TRACKS); let mut final_rxs = Vec::with_capacity(TRACKS); for _ in 0..TRACKS { let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10); for _ in 0..CHAIN { let (next_tx, next_rx) = tokio::sync::mpsc::channel(10); // Forward all the messages rt.spawn(async move { while let Some(v) = chain_rx.recv().await { next_tx.send(v).await.unwrap(); } }); chain_rx = next_rx; } // This final task cycles if needed let (final_tx, final_rx) = tokio::sync::mpsc::channel(10); let cycle_tx = start_tx.clone(); let mut rem = CYCLES; rt.spawn(async move { for _ in 0..CYCLES { let msg = chain_rx.recv().await.unwrap(); rem -= 1; if rem == 0 { final_tx.send(msg).await.unwrap(); } else { cycle_tx.send(msg).await.unwrap(); } } }); start_txs.push(start_tx); final_rxs.push(final_rx); } { rt.block_on(async move { for start_tx in start_txs { start_tx.send("ping").await.unwrap(); } for mut final_rx in final_rxs { final_rx.recv().await.unwrap(); } }); } } } #[test] fn spawn_shutdown() { let rt = rt(); let (tx, rx) = mpsc::channel(); rt.block_on(async { tokio::spawn(client_server(tx.clone())); }); // Use spawner rt.spawn(client_server(tx)); assert_ok!(rx.recv()); assert_ok!(rx.recv()); drop(rt); assert_err!(rx.try_recv()); } async fn client_server(tx: mpsc::Sender<()>) { let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); // Get the assigned address let addr = assert_ok!(server.local_addr()); // Spawn the server tokio::spawn(async move { // Accept a socket let (mut socket, _) = server.accept().await.unwrap(); // Write some data socket.write_all(b"hello").await.unwrap(); }); let mut client = TcpStream::connect(&addr).await.unwrap(); let mut buf = vec![]; client.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"hello"); tx.send(()).unwrap(); } #[test] fn drop_threadpool_drops_futures() { for _ in 0..1_000 { let num_inc = Arc::new(AtomicUsize::new(0)); let num_dec = Arc::new(AtomicUsize::new(0)); let num_drop = Arc::new(AtomicUsize::new(0)); struct Never(Arc<AtomicUsize>); impl Future for Never { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { Poll::Pending } } impl Drop for Never { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } let a = num_inc.clone(); let b = num_dec.clone(); let rt = runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { a.fetch_add(1, Relaxed); }) .on_thread_stop(move || { b.fetch_add(1, Relaxed); }) .build() .unwrap(); rt.spawn(Never(num_drop.clone())); // Wait for the pool to shutdown drop(rt); // Assert that only a single thread was spawned. let a = num_inc.load(Relaxed); assert!(a >= 1); // Assert that all threads shutdown let b = num_dec.load(Relaxed); assert_eq!(a, b); // Assert that the future was dropped let c = num_drop.load(Relaxed); assert_eq!(c, 1); } } #[test] fn start_stop_callbacks_called() { use std::sync::atomic::{AtomicUsize, Ordering}; let after_start = Arc::new(AtomicUsize::new(0)); let before_stop = Arc::new(AtomicUsize::new(0)); let after_inner = after_start.clone(); let before_inner = before_stop.clone(); let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { after_inner.clone().fetch_add(1, Ordering::Relaxed); }) .on_thread_stop(move || { before_inner.clone().fetch_add(1, Ordering::Relaxed); }) .build() .unwrap(); let (tx, rx) = oneshot::channel(); rt.spawn(async move { assert_ok!(tx.send(())); }); assert_ok!(rt.block_on(rx)); drop(rt); assert!(after_start.load(Ordering::Relaxed) > 0); assert!(before_stop.load(Ordering::Relaxed) > 0); } #[test] fn blocking() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..10 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); // there are four workers in the pool // so, if we run 4 blocking tasks, we know that handoff must have happened let block = Arc::new(std::sync::Barrier::new(5)); for _ in 0..4 { let block = block.clone(); rt.spawn(async move { tokio::task::block_in_place(move || { block.wait(); block.wait(); }) }); } block.wait(); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM { tx.send(()).unwrap(); } }); } rx.recv().unwrap(); // Wait for the pool to shutdown block.wait(); } } #[test] fn multi_threadpool() { use tokio::sync::oneshot; let rt1 = rt(); let rt2 = rt(); let (tx, rx) = oneshot::channel(); let (done_tx, done_rx) = mpsc::channel(); rt2.spawn(async move { rx.await.unwrap(); done_tx.send(()).unwrap(); }); rt1.spawn(async move { tx.send(()).unwrap(); }); done_rx.recv().unwrap(); } // When `block_in_place` returns, it attempts to reclaim the yielded runtime // worker. In this case, the remainder of the task is on the runtime worker and // must take part in the cooperative task budgeting system. // // The test ensures that, when this happens, attempting to consume from a // channel yields occasionally even if there are values ready to receive. #[test] fn coop_and_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() // Setting max threads to 1 prevents another thread from claiming the // runtime worker yielded as part of `block_in_place` and guarantees the // same thread will reclaim the worker at the end of the // `block_in_place` call. .max_blocking_threads(1) .build() .unwrap(); rt.block_on(async move { let (tx, mut rx) = tokio::sync::mpsc::channel(1024); // Fill the channel for _ in 0..1024 { tx.send(()).await.unwrap(); } drop(tx); tokio::spawn(async move { // Block in place without doing anything tokio::task::block_in_place(|| {}); // Receive all the values, this should trigger a `Pending` as the // coop limit will be reached. poll_fn(|cx| { while let Poll::Ready(v) = { tokio::pin! { let fut = rx.recv(); } Pin::new(&mut fut).poll(cx) } { if v.is_none() { panic!("did not yield"); } } Poll::Ready(()) }) .await }) .await .unwrap(); }); } #[test] fn yield_after_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .build() .unwrap(); rt.block_on(async { tokio::spawn(async move { // Block in place then enter a new runtime tokio::task::block_in_place(|| { let rt = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); rt.block_on(async {}); }); // Yield, then complete tokio::task::yield_now().await; }) .await .unwrap() }); } // Testing this does not panic #[test] fn max_blocking_threads() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(1) .build() .unwrap(); } #[test] #[should_panic] fn max_blocking_threads_set_to_zero() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(0) .build() .unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn hang_on_shutdown() { let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>(); tokio::spawn(async move { tokio::task::block_in_place(|| sync_rx.recv().ok()); }); tokio::spawn(async { tokio::time::sleep(std::time::Duration::from_secs(2)).await; drop(sync_tx); }); tokio::time::sleep(std::time::Duration::from_secs(1)).await; } /// Demonstrates tokio-rs/tokio#3869 #[test] fn wake_during_shutdown() { struct Shared { waker: Option<Waker>, } struct MyFuture { shared: Arc<Mutex<Shared>>, put_waker: bool, } impl MyFuture { fn new() -> (Self, Self) { let shared = Arc::new(Mutex::new(Shared { waker: None })); let f1 = MyFuture { shared: shared.clone(), put_waker: true, }; let f2 = MyFuture { shared, put_waker: false, }; (f1, f2) } } impl Future for MyFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let me = Pin::into_inner(self); let mut lock = me.shared.lock().unwrap(); if me.put_waker { lock.waker = Some(cx.waker().clone()); } Poll::Pending } } impl Drop for MyFuture { fn drop(&mut self) { let mut lock = self.shared.lock().unwrap(); if!self.put_waker { lock.waker.take().unwrap().wake(); } drop(lock); } } let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); let (f1, f2) = MyFuture::new(); rt.spawn(f1); rt.spawn(f2); rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await }); } #[should_panic] #[tokio::test] async fn test_block_in_place1() { tokio::task::block_in_place(|| {}); } #[tokio::test(flavor = "multi_thread")] async fn test_block_in_place2() { tokio::task::block_in_place(|| {}); } #[should_panic] #[tokio::main(flavor = "current_thread")] #[test] async fn test_block_in_place3() { tokio::task::block_in_place(|| {}); } #[tokio::main] #[test] async fn test_block_in_place4() { tokio::task::block_in_place(|| {}); } fn rt() -> runtime::Runtime { runtime::Runtime::new().unwrap() } #[cfg(tokio_unstable)] mod unstable { use super::*; #[test] fn test_disable_lifo_slot() { let rt = runtime::Builder::new_multi_thread() .disable_lifo_slot() .worker_threads(2) .build() .unwrap(); rt.block_on(async { tokio::spawn(async { // Spawn another task and block the thread until completion. If the LIFO slot // is used then the test doesn't complete. futures::executor::block_on(tokio::spawn(async {})).unwrap(); }) .await .unwrap(); }) } }
const CHAIN: usize = 200; const CYCLES: usize = 5;
random_line_split
rt_threaded.rs
#![warn(rust_2018_idioms)] #![cfg(all(feature = "full", not(tokio_wasi)))] use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio::runtime; use tokio::sync::oneshot; use tokio_test::{assert_err, assert_ok}; use futures::future::poll_fn; use std::future::Future; use std::pin::Pin; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use std::sync::{mpsc, Arc, Mutex}; use std::task::{Context, Poll, Waker}; macro_rules! cfg_metrics { ($($t:tt)*) => { #[cfg(tokio_unstable)] { $( $t )* } } } #[test] fn single_thread() { // No panic when starting a runtime w/ a single thread let _ = runtime::Builder::new_multi_thread() .enable_all() .worker_threads(1) .build(); } #[test] fn many_oneshot_futures() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..5 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM { tx.send(()).unwrap(); } }); } rx.recv().unwrap(); // Wait for the pool to shutdown drop(rt); } } #[test] fn spawn_two() { let rt = rt(); let out = rt.block_on(async { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { tokio::spawn(async move { tx.send("ZOMG").unwrap(); }); }); assert_ok!(rx.await) }); assert_eq!(out, "ZOMG"); cfg_metrics! { let metrics = rt.metrics(); drop(rt); assert_eq!(1, metrics.remote_schedule_count()); let mut local = 0; for i in 0..metrics.num_workers() { local += metrics.worker_local_schedule_count(i); } assert_eq!(1, local); } } #[test] fn many_multishot_futures() { const CHAIN: usize = 200; const CYCLES: usize = 5; const TRACKS: usize = 50; for _ in 0..50 { let rt = rt(); let mut start_txs = Vec::with_capacity(TRACKS); let mut final_rxs = Vec::with_capacity(TRACKS); for _ in 0..TRACKS { let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10); for _ in 0..CHAIN { let (next_tx, next_rx) = tokio::sync::mpsc::channel(10); // Forward all the messages rt.spawn(async move { while let Some(v) = chain_rx.recv().await { next_tx.send(v).await.unwrap(); } }); chain_rx = next_rx; } // This final task cycles if needed let (final_tx, final_rx) = tokio::sync::mpsc::channel(10); let cycle_tx = start_tx.clone(); let mut rem = CYCLES; rt.spawn(async move { for _ in 0..CYCLES { let msg = chain_rx.recv().await.unwrap(); rem -= 1; if rem == 0 { final_tx.send(msg).await.unwrap(); } else { cycle_tx.send(msg).await.unwrap(); } } }); start_txs.push(start_tx); final_rxs.push(final_rx); } { rt.block_on(async move { for start_tx in start_txs { start_tx.send("ping").await.unwrap(); } for mut final_rx in final_rxs { final_rx.recv().await.unwrap(); } }); } } } #[test] fn spawn_shutdown() { let rt = rt(); let (tx, rx) = mpsc::channel(); rt.block_on(async { tokio::spawn(client_server(tx.clone())); }); // Use spawner rt.spawn(client_server(tx)); assert_ok!(rx.recv()); assert_ok!(rx.recv()); drop(rt); assert_err!(rx.try_recv()); } async fn client_server(tx: mpsc::Sender<()>) { let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await); // Get the assigned address let addr = assert_ok!(server.local_addr()); // Spawn the server tokio::spawn(async move { // Accept a socket let (mut socket, _) = server.accept().await.unwrap(); // Write some data socket.write_all(b"hello").await.unwrap(); }); let mut client = TcpStream::connect(&addr).await.unwrap(); let mut buf = vec![]; client.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"hello"); tx.send(()).unwrap(); } #[test] fn drop_threadpool_drops_futures() { for _ in 0..1_000 { let num_inc = Arc::new(AtomicUsize::new(0)); let num_dec = Arc::new(AtomicUsize::new(0)); let num_drop = Arc::new(AtomicUsize::new(0)); struct Never(Arc<AtomicUsize>); impl Future for Never { type Output = (); fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { Poll::Pending } } impl Drop for Never { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } let a = num_inc.clone(); let b = num_dec.clone(); let rt = runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { a.fetch_add(1, Relaxed); }) .on_thread_stop(move || { b.fetch_add(1, Relaxed); }) .build() .unwrap(); rt.spawn(Never(num_drop.clone())); // Wait for the pool to shutdown drop(rt); // Assert that only a single thread was spawned. let a = num_inc.load(Relaxed); assert!(a >= 1); // Assert that all threads shutdown let b = num_dec.load(Relaxed); assert_eq!(a, b); // Assert that the future was dropped let c = num_drop.load(Relaxed); assert_eq!(c, 1); } } #[test] fn start_stop_callbacks_called() { use std::sync::atomic::{AtomicUsize, Ordering}; let after_start = Arc::new(AtomicUsize::new(0)); let before_stop = Arc::new(AtomicUsize::new(0)); let after_inner = after_start.clone(); let before_inner = before_stop.clone(); let rt = tokio::runtime::Builder::new_multi_thread() .enable_all() .on_thread_start(move || { after_inner.clone().fetch_add(1, Ordering::Relaxed); }) .on_thread_stop(move || { before_inner.clone().fetch_add(1, Ordering::Relaxed); }) .build() .unwrap(); let (tx, rx) = oneshot::channel(); rt.spawn(async move { assert_ok!(tx.send(())); }); assert_ok!(rt.block_on(rx)); drop(rt); assert!(after_start.load(Ordering::Relaxed) > 0); assert!(before_stop.load(Ordering::Relaxed) > 0); } #[test] fn blocking() { // used for notifying the main thread const NUM: usize = 1_000; for _ in 0..10 { let (tx, rx) = mpsc::channel(); let rt = rt(); let cnt = Arc::new(AtomicUsize::new(0)); // there are four workers in the pool // so, if we run 4 blocking tasks, we know that handoff must have happened let block = Arc::new(std::sync::Barrier::new(5)); for _ in 0..4 { let block = block.clone(); rt.spawn(async move { tokio::task::block_in_place(move || { block.wait(); block.wait(); }) }); } block.wait(); for _ in 0..NUM { let cnt = cnt.clone(); let tx = tx.clone(); rt.spawn(async move { let num = cnt.fetch_add(1, Relaxed) + 1; if num == NUM { tx.send(()).unwrap(); } }); } rx.recv().unwrap(); // Wait for the pool to shutdown block.wait(); } } #[test] fn multi_threadpool() { use tokio::sync::oneshot; let rt1 = rt(); let rt2 = rt(); let (tx, rx) = oneshot::channel(); let (done_tx, done_rx) = mpsc::channel(); rt2.spawn(async move { rx.await.unwrap(); done_tx.send(()).unwrap(); }); rt1.spawn(async move { tx.send(()).unwrap(); }); done_rx.recv().unwrap(); } // When `block_in_place` returns, it attempts to reclaim the yielded runtime // worker. In this case, the remainder of the task is on the runtime worker and // must take part in the cooperative task budgeting system. // // The test ensures that, when this happens, attempting to consume from a // channel yields occasionally even if there are values ready to receive. #[test] fn coop_and_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() // Setting max threads to 1 prevents another thread from claiming the // runtime worker yielded as part of `block_in_place` and guarantees the // same thread will reclaim the worker at the end of the // `block_in_place` call. .max_blocking_threads(1) .build() .unwrap(); rt.block_on(async move { let (tx, mut rx) = tokio::sync::mpsc::channel(1024); // Fill the channel for _ in 0..1024 { tx.send(()).await.unwrap(); } drop(tx); tokio::spawn(async move { // Block in place without doing anything tokio::task::block_in_place(|| {}); // Receive all the values, this should trigger a `Pending` as the // coop limit will be reached. poll_fn(|cx| { while let Poll::Ready(v) = { tokio::pin! { let fut = rx.recv(); } Pin::new(&mut fut).poll(cx) } { if v.is_none() { panic!("did not yield"); } } Poll::Ready(()) }) .await }) .await .unwrap(); }); } #[test] fn yield_after_block_in_place() { let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .build() .unwrap(); rt.block_on(async { tokio::spawn(async move { // Block in place then enter a new runtime tokio::task::block_in_place(|| { let rt = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); rt.block_on(async {}); }); // Yield, then complete tokio::task::yield_now().await; }) .await .unwrap() }); } // Testing this does not panic #[test] fn max_blocking_threads() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(1) .build() .unwrap(); } #[test] #[should_panic] fn max_blocking_threads_set_to_zero() { let _rt = tokio::runtime::Builder::new_multi_thread() .max_blocking_threads(0) .build() .unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn hang_on_shutdown() { let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>(); tokio::spawn(async move { tokio::task::block_in_place(|| sync_rx.recv().ok()); }); tokio::spawn(async { tokio::time::sleep(std::time::Duration::from_secs(2)).await; drop(sync_tx); }); tokio::time::sleep(std::time::Duration::from_secs(1)).await; } /// Demonstrates tokio-rs/tokio#3869 #[test] fn
() { struct Shared { waker: Option<Waker>, } struct MyFuture { shared: Arc<Mutex<Shared>>, put_waker: bool, } impl MyFuture { fn new() -> (Self, Self) { let shared = Arc::new(Mutex::new(Shared { waker: None })); let f1 = MyFuture { shared: shared.clone(), put_waker: true, }; let f2 = MyFuture { shared, put_waker: false, }; (f1, f2) } } impl Future for MyFuture { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let me = Pin::into_inner(self); let mut lock = me.shared.lock().unwrap(); if me.put_waker { lock.waker = Some(cx.waker().clone()); } Poll::Pending } } impl Drop for MyFuture { fn drop(&mut self) { let mut lock = self.shared.lock().unwrap(); if!self.put_waker { lock.waker.take().unwrap().wake(); } drop(lock); } } let rt = tokio::runtime::Builder::new_multi_thread() .worker_threads(1) .enable_all() .build() .unwrap(); let (f1, f2) = MyFuture::new(); rt.spawn(f1); rt.spawn(f2); rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await }); } #[should_panic] #[tokio::test] async fn test_block_in_place1() { tokio::task::block_in_place(|| {}); } #[tokio::test(flavor = "multi_thread")] async fn test_block_in_place2() { tokio::task::block_in_place(|| {}); } #[should_panic] #[tokio::main(flavor = "current_thread")] #[test] async fn test_block_in_place3() { tokio::task::block_in_place(|| {}); } #[tokio::main] #[test] async fn test_block_in_place4() { tokio::task::block_in_place(|| {}); } fn rt() -> runtime::Runtime { runtime::Runtime::new().unwrap() } #[cfg(tokio_unstable)] mod unstable { use super::*; #[test] fn test_disable_lifo_slot() { let rt = runtime::Builder::new_multi_thread() .disable_lifo_slot() .worker_threads(2) .build() .unwrap(); rt.block_on(async { tokio::spawn(async { // Spawn another task and block the thread until completion. If the LIFO slot // is used then the test doesn't complete. futures::executor::block_on(tokio::spawn(async {})).unwrap(); }) .await .unwrap(); }) } }
wake_during_shutdown
identifier_name
d3cap.rs
use std::thread::{self, JoinHandle}; use std::hash::{Hash}; use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::io::{self, Read}; use std::sync::{Arc,RwLock}; use std::sync::mpsc::{channel, Sender, SendError}; use toml; use multicast::Multicast; use json_serve::uiserver::UIServer; use util::{ntohs, skip_bytes_cast, skip_cast}; use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header}; use ether::{EthernetHeader, MacAddr, ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X}; use dot11::{self, FrameType}; use tap; use pkt_graph::{PktMeta, ProtocolGraph, RouteStats}; use fixed_ring::FixedRingBuffer; use pcap::pcap as cap; #[derive(RustcEncodable, Clone)] struct RouteStatsMsg<T> { typ: &'static str, route: RouteStats<T>, } #[derive(Debug)] pub enum Pkt { Mac(PktMeta<MacAddr>), IP4(PktMeta<IP4Addr>), IP6(PktMeta<IP6Addr>), } #[derive(Clone)] pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> { pub typ: &'static str, pub graph: Arc<RwLock<ProtocolGraph<T>>>, stats_mcast: Multicast<RouteStatsMsg<T>>, } impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> { fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> { Ok(ProtocolHandler { typ: typ, graph: Arc::new(RwLock::new(ProtocolGraph::new())), stats_mcast: Multicast::spawn()? }) } fn update(&mut self, pkt: &PktMeta<T>) { let route_stats = { self.graph.write().unwrap().update(pkt) }; let route_stats_msg = Arc::new(RouteStatsMsg { typ: self.typ, route: route_stats }); self.stats_mcast.send(route_stats_msg).unwrap(); } } #[derive(Clone)] pub struct ProtoGraphController { pub cap_tx: Sender<Pkt>, pub mac: ProtocolHandler<MacAddr>, pub ip4: ProtocolHandler<IP4Addr>, pub ip6: ProtocolHandler<IP6Addr>, } impl ProtoGraphController { fn spawn() -> io::Result<ProtoGraphController> { let (cap_tx, cap_rx) = channel(); let ctl = ProtoGraphController { cap_tx: cap_tx, mac: ProtocolHandler::new("mac")?, ip4: ProtocolHandler::new("ip4")?, ip6: ProtocolHandler::new("ip6")?, }; let mut phctl = ctl.clone(); thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || { loop { let pkt = cap_rx.recv(); if pkt.is_err() { break } match pkt.unwrap() { Pkt::Mac(ref p) => phctl.mac.update(p), Pkt::IP4(ref p) => phctl.ip4.update(p), Pkt::IP6(ref p) => phctl.ip6.update(p), } } })?; Ok(ctl) } fn sender(&self) -> Sender<Pkt> { self.cap_tx.clone() } fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) { self.mac.stats_mcast.register(s).unwrap(); } fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) { self.ip4.stats_mcast.register(s).unwrap(); } fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) { self.ip6.stats_mcast.register(s).unwrap(); } } enum ParseErr { Send, UnknownPacket } impl<T> From<SendError<T>> for ParseErr { fn from(_: SendError<T>) -> ParseErr { ParseErr::Send } } trait PktParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>; } pub struct CaptureCtx { sess: cap::PcapSession, parser: Box<PktParser+'static> } impl CaptureCtx { fn parse_next(&mut self) { let p = &mut self.parser; self.sess.next(|cap| { match p.parse(cap) { _ => () //just ignore } }); } } struct EthernetParser { pkts: Sender<Pkt>, } impl PktParser for EthernetParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) }; self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?; match ether_hdr.typ { ETHERTYPE_ARP => { //io::println("ARP!"); }, ETHERTYPE_IP4 => { let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_IP6 => { let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_802_1X => { //io::println("802.1X!"); }, _ => { //println!("Unknown type: {:x}", x); } } Ok(()) } } #[derive(Debug)] pub struct PhysData { // TODO: this name sucks frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, } impl PhysData { fn new(frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, ) -> PhysData { PhysData { frame_ty: frame_ty, addrs: addrs, rate: rate, channel: channel, antenna_signal: antenna_signal, antenna_noise: antenna_noise, antenna: antenna } } fn dist(&self) -> f32 { let freq = f32::from(self.channel.mhz); let signal = f32::from(self.antenna_signal.dbm); let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0; (10.0f32).powf(exp) } } #[derive(PartialEq, Eq, Hash)] pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]); pub struct PhysDataVal { pub dat: FixedRingBuffer<PhysData>, pub count: u32, } impl PhysDataVal { pub fn new() -> PhysDataVal { PhysDataVal { dat: FixedRingBuffer::new(10), count: 0 } } pub fn avg_dist(&self) -> f32 { let mut s = 0.0; for pd in self.dat.iter() { s += pd.dist(); } s / (self.dat.len() as f32) } } #[derive(Clone)] pub struct PhysDataController { pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>, pd_tx: Sender<PhysData> } impl PhysDataController { fn spawn() -> io::Result<PhysDataController> { let (pd_tx, pd_rx) = channel(); let out = PhysDataController { pd_tx: pd_tx, map: Arc::new(RwLock::new(HashMap::new())) }; let ctl = out.clone(); thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || { loop { let res = pd_rx.recv(); if res.is_err() { break } let pd = res.unwrap(); match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) { Entry::Occupied(mut e) => { let mut pdc = e.get_mut(); pdc.dat.push(pd); pdc.count += 1; } Entry::Vacant(e) => { let mut pdc = PhysDataVal::new(); pdc.dat.push(pd); pdc.count += 1; e.insert(pdc); } }; } })?; Ok(out) } fn sender(&self) -> Sender<PhysData> { self.pd_tx.clone() } } struct RadiotapParser { pkts: Sender<Pkt>, phys: Sender<PhysData> } impl RadiotapParser { fn parse_known_headers(&self, frame_ty: FrameType, addrs: [MacAddr; 3], tap_hdr: &tap::RadiotapHeader) { match tap_hdr.it_present { tap::ItPresent::COMMON_A => { if let Some(vals) = tap::CommonA::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, Some(vals.rate), vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, tap::ItPresent::COMMON_B => { if let Some(vals) = tap::CommonB::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, None, vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, _ => {} //Unknown header } } } impl PktParser for RadiotapParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { fn magic<U>(pkt: &tap::RadiotapHeader) -> &U { unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) } } let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) }; let base: &dot11::Dot11BaseHeader = magic(tap_hdr); let fc = &base.fr_ctrl; if fc.protocol_version()!= 0 { // bogus packet, bail return Err(ParseErr::UnknownPacket); } match fc.frame_type() { ft @ FrameType::Management => { let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr); self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr); } ft @ FrameType::Data => { let data: &dot11::DataFrameHeader = magic(tap_hdr); //TODO: get length self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?; self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr); } FrameType::Control | FrameType::Unknown => { //println!("Unknown frame type"); } } Ok(()) } } pub fn init_capture(conf: &D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> CaptureCtx { let sess = match conf.file { Some(ref f) => cap::PcapSession::from_file(f), None => { println!("No session file"); let sess_builder = match conf.interface { Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev), None => cap::PcapSessionBuilder::new() }; sess_builder.unwrap() .buffer_size(0xFFFF) .timeout(1000) .promisc(conf.promisc) .rfmon(conf.monitor) .activate() } }; let parser = match sess.datalink() { cap::DLT_ETHERNET => { Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser> } cap::DLT_IEEE802_11_RADIO => { Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser> } x => panic!("unsupported datalink type: {}", x) }; CaptureCtx { sess: sess, parser: parser } } pub fn start_capture(conf: D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> { thread::Builder::new().name("packet_capture".to_owned()).spawn(move || { let mut cap = init_capture(&conf, pkt_sender, pd_sender); loop { cap.parse_next(); } }) } enum LoadMacError { IOError(io::Error), TomlError(Option<toml::de::Error>) } impl From<io::Error> for LoadMacError { fn from(err: io::Error) -> LoadMacError { LoadMacError::IOError(err) } } impl From<toml::de::Error> for LoadMacError { fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) } } fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> { let mut s = String::new(); let mut f = File::open(&file)?; f.read_to_string(&mut s)?; let t = s.parse::<toml::Value>()?; if let Some(k) = t.get(&"known-macs".to_owned()) { if let Some(tbl) = k.as_table() { return Ok(tbl.iter() .map(|(k, v)| (MacAddr::from_string(k), v.as_str())) .filter_map(|x| match x { (Some(addr), Some(alias)) => Some((addr, alias.to_owned())), _ => None })
} } Err(LoadMacError::TomlError(None)) } fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> { let ui = UIServer::spawn(port, mac_map)?; pg_ctl.register_mac_listener(ui.create_sender()?); pg_ctl.register_ip4_listener(ui.create_sender()?); pg_ctl.register_ip6_listener(ui.create_sender()?); Ok(()) } pub type MacMap = HashMap<MacAddr, String>; pub type IP4Map = HashMap<IP4Addr, String>; pub type IP6Map = HashMap<IP6Addr, String>; #[derive(Clone)] pub struct D3capController { pub pg_ctrl: ProtoGraphController, pub pd_ctrl: PhysDataController, pub mac_names: MacMap, pub ip4_names: IP4Map, pub ip6_names: IP6Map, pub server_started: bool } impl D3capController { pub fn spawn(conf: D3capConf) -> io::Result<D3capController> { let mac_names = conf.conf.as_ref() .map_or_else(HashMap::new, |x| { load_mac_addrs(x).unwrap_or_else(|_| HashMap::new()) }); let ip4_names = HashMap::new(); let ip6_names = HashMap::new(); let pg_ctrl = ProtoGraphController::spawn()?; let pd_ctrl = PhysDataController::spawn()?; start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap(); Ok(D3capController { pg_ctrl: pg_ctrl, pd_ctrl: pd_ctrl, mac_names: mac_names, ip4_names: ip4_names, ip6_names: ip6_names, server_started: false }) } pub fn start_websocket(&mut self, port: u16) -> io::Result<()> { if self.server_started { println!("server already started"); } else { start_websocket(port, &self.mac_names, &self.pg_ctrl)?; self.server_started = true; } Ok(()) } } #[derive(Clone, Debug)] pub struct D3capConf { pub websocket: Option<u16>, pub interface: Option<String>, pub file: Option<String>, pub conf: Option<String>, pub promisc: bool, pub monitor: bool }
.collect())
random_line_split
d3cap.rs
use std::thread::{self, JoinHandle}; use std::hash::{Hash}; use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::io::{self, Read}; use std::sync::{Arc,RwLock}; use std::sync::mpsc::{channel, Sender, SendError}; use toml; use multicast::Multicast; use json_serve::uiserver::UIServer; use util::{ntohs, skip_bytes_cast, skip_cast}; use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header}; use ether::{EthernetHeader, MacAddr, ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X}; use dot11::{self, FrameType}; use tap; use pkt_graph::{PktMeta, ProtocolGraph, RouteStats}; use fixed_ring::FixedRingBuffer; use pcap::pcap as cap; #[derive(RustcEncodable, Clone)] struct RouteStatsMsg<T> { typ: &'static str, route: RouteStats<T>, } #[derive(Debug)] pub enum Pkt { Mac(PktMeta<MacAddr>), IP4(PktMeta<IP4Addr>), IP6(PktMeta<IP6Addr>), } #[derive(Clone)] pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> { pub typ: &'static str, pub graph: Arc<RwLock<ProtocolGraph<T>>>, stats_mcast: Multicast<RouteStatsMsg<T>>, } impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> { fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> { Ok(ProtocolHandler { typ: typ, graph: Arc::new(RwLock::new(ProtocolGraph::new())), stats_mcast: Multicast::spawn()? }) } fn update(&mut self, pkt: &PktMeta<T>) { let route_stats = { self.graph.write().unwrap().update(pkt) }; let route_stats_msg = Arc::new(RouteStatsMsg { typ: self.typ, route: route_stats }); self.stats_mcast.send(route_stats_msg).unwrap(); } } #[derive(Clone)] pub struct ProtoGraphController { pub cap_tx: Sender<Pkt>, pub mac: ProtocolHandler<MacAddr>, pub ip4: ProtocolHandler<IP4Addr>, pub ip6: ProtocolHandler<IP6Addr>, } impl ProtoGraphController { fn spawn() -> io::Result<ProtoGraphController> { let (cap_tx, cap_rx) = channel(); let ctl = ProtoGraphController { cap_tx: cap_tx, mac: ProtocolHandler::new("mac")?, ip4: ProtocolHandler::new("ip4")?, ip6: ProtocolHandler::new("ip6")?, }; let mut phctl = ctl.clone(); thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || { loop { let pkt = cap_rx.recv(); if pkt.is_err() { break } match pkt.unwrap() { Pkt::Mac(ref p) => phctl.mac.update(p), Pkt::IP4(ref p) => phctl.ip4.update(p), Pkt::IP6(ref p) => phctl.ip6.update(p), } } })?; Ok(ctl) } fn sender(&self) -> Sender<Pkt> { self.cap_tx.clone() } fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) { self.mac.stats_mcast.register(s).unwrap(); } fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) { self.ip4.stats_mcast.register(s).unwrap(); } fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) { self.ip6.stats_mcast.register(s).unwrap(); } } enum ParseErr { Send, UnknownPacket } impl<T> From<SendError<T>> for ParseErr { fn from(_: SendError<T>) -> ParseErr { ParseErr::Send } } trait PktParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>; } pub struct CaptureCtx { sess: cap::PcapSession, parser: Box<PktParser+'static> } impl CaptureCtx { fn parse_next(&mut self) { let p = &mut self.parser; self.sess.next(|cap| { match p.parse(cap) { _ => () //just ignore } }); } } struct EthernetParser { pkts: Sender<Pkt>, } impl PktParser for EthernetParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) }; self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?; match ether_hdr.typ { ETHERTYPE_ARP => { //io::println("ARP!"); }, ETHERTYPE_IP4 => { let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_IP6 => { let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_802_1X => { //io::println("802.1X!"); }, _ => { //println!("Unknown type: {:x}", x); } } Ok(()) } } #[derive(Debug)] pub struct PhysData { // TODO: this name sucks frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, } impl PhysData { fn new(frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, ) -> PhysData { PhysData { frame_ty: frame_ty, addrs: addrs, rate: rate, channel: channel, antenna_signal: antenna_signal, antenna_noise: antenna_noise, antenna: antenna } } fn dist(&self) -> f32 { let freq = f32::from(self.channel.mhz); let signal = f32::from(self.antenna_signal.dbm); let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0; (10.0f32).powf(exp) } } #[derive(PartialEq, Eq, Hash)] pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]); pub struct PhysDataVal { pub dat: FixedRingBuffer<PhysData>, pub count: u32, } impl PhysDataVal { pub fn new() -> PhysDataVal { PhysDataVal { dat: FixedRingBuffer::new(10), count: 0 } } pub fn avg_dist(&self) -> f32 { let mut s = 0.0; for pd in self.dat.iter() { s += pd.dist(); } s / (self.dat.len() as f32) } } #[derive(Clone)] pub struct PhysDataController { pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>, pd_tx: Sender<PhysData> } impl PhysDataController { fn spawn() -> io::Result<PhysDataController> { let (pd_tx, pd_rx) = channel(); let out = PhysDataController { pd_tx: pd_tx, map: Arc::new(RwLock::new(HashMap::new())) }; let ctl = out.clone(); thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || { loop { let res = pd_rx.recv(); if res.is_err() { break } let pd = res.unwrap(); match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) { Entry::Occupied(mut e) => { let mut pdc = e.get_mut(); pdc.dat.push(pd); pdc.count += 1; } Entry::Vacant(e) => { let mut pdc = PhysDataVal::new(); pdc.dat.push(pd); pdc.count += 1; e.insert(pdc); } }; } })?; Ok(out) } fn sender(&self) -> Sender<PhysData> { self.pd_tx.clone() } } struct RadiotapParser { pkts: Sender<Pkt>, phys: Sender<PhysData> } impl RadiotapParser { fn parse_known_headers(&self, frame_ty: FrameType, addrs: [MacAddr; 3], tap_hdr: &tap::RadiotapHeader) { match tap_hdr.it_present { tap::ItPresent::COMMON_A => { if let Some(vals) = tap::CommonA::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, Some(vals.rate), vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, tap::ItPresent::COMMON_B => { if let Some(vals) = tap::CommonB::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, None, vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, _ => {} //Unknown header } } } impl PktParser for RadiotapParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { fn magic<U>(pkt: &tap::RadiotapHeader) -> &U
let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) }; let base: &dot11::Dot11BaseHeader = magic(tap_hdr); let fc = &base.fr_ctrl; if fc.protocol_version()!= 0 { // bogus packet, bail return Err(ParseErr::UnknownPacket); } match fc.frame_type() { ft @ FrameType::Management => { let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr); self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr); } ft @ FrameType::Data => { let data: &dot11::DataFrameHeader = magic(tap_hdr); //TODO: get length self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?; self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr); } FrameType::Control | FrameType::Unknown => { //println!("Unknown frame type"); } } Ok(()) } } pub fn init_capture(conf: &D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> CaptureCtx { let sess = match conf.file { Some(ref f) => cap::PcapSession::from_file(f), None => { println!("No session file"); let sess_builder = match conf.interface { Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev), None => cap::PcapSessionBuilder::new() }; sess_builder.unwrap() .buffer_size(0xFFFF) .timeout(1000) .promisc(conf.promisc) .rfmon(conf.monitor) .activate() } }; let parser = match sess.datalink() { cap::DLT_ETHERNET => { Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser> } cap::DLT_IEEE802_11_RADIO => { Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser> } x => panic!("unsupported datalink type: {}", x) }; CaptureCtx { sess: sess, parser: parser } } pub fn start_capture(conf: D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> { thread::Builder::new().name("packet_capture".to_owned()).spawn(move || { let mut cap = init_capture(&conf, pkt_sender, pd_sender); loop { cap.parse_next(); } }) } enum LoadMacError { IOError(io::Error), TomlError(Option<toml::de::Error>) } impl From<io::Error> for LoadMacError { fn from(err: io::Error) -> LoadMacError { LoadMacError::IOError(err) } } impl From<toml::de::Error> for LoadMacError { fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) } } fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> { let mut s = String::new(); let mut f = File::open(&file)?; f.read_to_string(&mut s)?; let t = s.parse::<toml::Value>()?; if let Some(k) = t.get(&"known-macs".to_owned()) { if let Some(tbl) = k.as_table() { return Ok(tbl.iter() .map(|(k, v)| (MacAddr::from_string(k), v.as_str())) .filter_map(|x| match x { (Some(addr), Some(alias)) => Some((addr, alias.to_owned())), _ => None }) .collect()) } } Err(LoadMacError::TomlError(None)) } fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> { let ui = UIServer::spawn(port, mac_map)?; pg_ctl.register_mac_listener(ui.create_sender()?); pg_ctl.register_ip4_listener(ui.create_sender()?); pg_ctl.register_ip6_listener(ui.create_sender()?); Ok(()) } pub type MacMap = HashMap<MacAddr, String>; pub type IP4Map = HashMap<IP4Addr, String>; pub type IP6Map = HashMap<IP6Addr, String>; #[derive(Clone)] pub struct D3capController { pub pg_ctrl: ProtoGraphController, pub pd_ctrl: PhysDataController, pub mac_names: MacMap, pub ip4_names: IP4Map, pub ip6_names: IP6Map, pub server_started: bool } impl D3capController { pub fn spawn(conf: D3capConf) -> io::Result<D3capController> { let mac_names = conf.conf.as_ref() .map_or_else(HashMap::new, |x| { load_mac_addrs(x).unwrap_or_else(|_| HashMap::new()) }); let ip4_names = HashMap::new(); let ip6_names = HashMap::new(); let pg_ctrl = ProtoGraphController::spawn()?; let pd_ctrl = PhysDataController::spawn()?; start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap(); Ok(D3capController { pg_ctrl: pg_ctrl, pd_ctrl: pd_ctrl, mac_names: mac_names, ip4_names: ip4_names, ip6_names: ip6_names, server_started: false }) } pub fn start_websocket(&mut self, port: u16) -> io::Result<()> { if self.server_started { println!("server already started"); } else { start_websocket(port, &self.mac_names, &self.pg_ctrl)?; self.server_started = true; } Ok(()) } } #[derive(Clone, Debug)] pub struct D3capConf { pub websocket: Option<u16>, pub interface: Option<String>, pub file: Option<String>, pub conf: Option<String>, pub promisc: bool, pub monitor: bool }
{ unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) } }
identifier_body
d3cap.rs
use std::thread::{self, JoinHandle}; use std::hash::{Hash}; use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::io::{self, Read}; use std::sync::{Arc,RwLock}; use std::sync::mpsc::{channel, Sender, SendError}; use toml; use multicast::Multicast; use json_serve::uiserver::UIServer; use util::{ntohs, skip_bytes_cast, skip_cast}; use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header}; use ether::{EthernetHeader, MacAddr, ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X}; use dot11::{self, FrameType}; use tap; use pkt_graph::{PktMeta, ProtocolGraph, RouteStats}; use fixed_ring::FixedRingBuffer; use pcap::pcap as cap; #[derive(RustcEncodable, Clone)] struct RouteStatsMsg<T> { typ: &'static str, route: RouteStats<T>, } #[derive(Debug)] pub enum Pkt { Mac(PktMeta<MacAddr>), IP4(PktMeta<IP4Addr>), IP6(PktMeta<IP6Addr>), } #[derive(Clone)] pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> { pub typ: &'static str, pub graph: Arc<RwLock<ProtocolGraph<T>>>, stats_mcast: Multicast<RouteStatsMsg<T>>, } impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> { fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> { Ok(ProtocolHandler { typ: typ, graph: Arc::new(RwLock::new(ProtocolGraph::new())), stats_mcast: Multicast::spawn()? }) } fn update(&mut self, pkt: &PktMeta<T>) { let route_stats = { self.graph.write().unwrap().update(pkt) }; let route_stats_msg = Arc::new(RouteStatsMsg { typ: self.typ, route: route_stats }); self.stats_mcast.send(route_stats_msg).unwrap(); } } #[derive(Clone)] pub struct ProtoGraphController { pub cap_tx: Sender<Pkt>, pub mac: ProtocolHandler<MacAddr>, pub ip4: ProtocolHandler<IP4Addr>, pub ip6: ProtocolHandler<IP6Addr>, } impl ProtoGraphController { fn spawn() -> io::Result<ProtoGraphController> { let (cap_tx, cap_rx) = channel(); let ctl = ProtoGraphController { cap_tx: cap_tx, mac: ProtocolHandler::new("mac")?, ip4: ProtocolHandler::new("ip4")?, ip6: ProtocolHandler::new("ip6")?, }; let mut phctl = ctl.clone(); thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || { loop { let pkt = cap_rx.recv(); if pkt.is_err() { break } match pkt.unwrap() { Pkt::Mac(ref p) => phctl.mac.update(p), Pkt::IP4(ref p) => phctl.ip4.update(p), Pkt::IP6(ref p) => phctl.ip6.update(p), } } })?; Ok(ctl) } fn sender(&self) -> Sender<Pkt> { self.cap_tx.clone() } fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) { self.mac.stats_mcast.register(s).unwrap(); } fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) { self.ip4.stats_mcast.register(s).unwrap(); } fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) { self.ip6.stats_mcast.register(s).unwrap(); } } enum ParseErr { Send, UnknownPacket } impl<T> From<SendError<T>> for ParseErr { fn from(_: SendError<T>) -> ParseErr { ParseErr::Send } } trait PktParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>; } pub struct CaptureCtx { sess: cap::PcapSession, parser: Box<PktParser+'static> } impl CaptureCtx { fn parse_next(&mut self) { let p = &mut self.parser; self.sess.next(|cap| { match p.parse(cap) { _ => () //just ignore } }); } } struct EthernetParser { pkts: Sender<Pkt>, } impl PktParser for EthernetParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) }; self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?; match ether_hdr.typ { ETHERTYPE_ARP => { //io::println("ARP!"); }, ETHERTYPE_IP4 => { let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_IP6 => { let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_802_1X => { //io::println("802.1X!"); }, _ => { //println!("Unknown type: {:x}", x); } } Ok(()) } } #[derive(Debug)] pub struct PhysData { // TODO: this name sucks frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, } impl PhysData { fn new(frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, ) -> PhysData { PhysData { frame_ty: frame_ty, addrs: addrs, rate: rate, channel: channel, antenna_signal: antenna_signal, antenna_noise: antenna_noise, antenna: antenna } } fn dist(&self) -> f32 { let freq = f32::from(self.channel.mhz); let signal = f32::from(self.antenna_signal.dbm); let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0; (10.0f32).powf(exp) } } #[derive(PartialEq, Eq, Hash)] pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]); pub struct PhysDataVal { pub dat: FixedRingBuffer<PhysData>, pub count: u32, } impl PhysDataVal { pub fn new() -> PhysDataVal { PhysDataVal { dat: FixedRingBuffer::new(10), count: 0 } } pub fn avg_dist(&self) -> f32 { let mut s = 0.0; for pd in self.dat.iter() { s += pd.dist(); } s / (self.dat.len() as f32) } } #[derive(Clone)] pub struct PhysDataController { pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>, pd_tx: Sender<PhysData> } impl PhysDataController { fn spawn() -> io::Result<PhysDataController> { let (pd_tx, pd_rx) = channel(); let out = PhysDataController { pd_tx: pd_tx, map: Arc::new(RwLock::new(HashMap::new())) }; let ctl = out.clone(); thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || { loop { let res = pd_rx.recv(); if res.is_err() { break } let pd = res.unwrap(); match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) { Entry::Occupied(mut e) => { let mut pdc = e.get_mut(); pdc.dat.push(pd); pdc.count += 1; } Entry::Vacant(e) => { let mut pdc = PhysDataVal::new(); pdc.dat.push(pd); pdc.count += 1; e.insert(pdc); } }; } })?; Ok(out) } fn sender(&self) -> Sender<PhysData> { self.pd_tx.clone() } } struct RadiotapParser { pkts: Sender<Pkt>, phys: Sender<PhysData> } impl RadiotapParser { fn parse_known_headers(&self, frame_ty: FrameType, addrs: [MacAddr; 3], tap_hdr: &tap::RadiotapHeader) { match tap_hdr.it_present { tap::ItPresent::COMMON_A => { if let Some(vals) = tap::CommonA::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, Some(vals.rate), vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, tap::ItPresent::COMMON_B => { if let Some(vals) = tap::CommonB::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, None, vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, _ => {} //Unknown header } } } impl PktParser for RadiotapParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { fn magic<U>(pkt: &tap::RadiotapHeader) -> &U { unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) } } let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) }; let base: &dot11::Dot11BaseHeader = magic(tap_hdr); let fc = &base.fr_ctrl; if fc.protocol_version()!= 0 { // bogus packet, bail return Err(ParseErr::UnknownPacket); } match fc.frame_type() { ft @ FrameType::Management => { let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr); self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr); } ft @ FrameType::Data => { let data: &dot11::DataFrameHeader = magic(tap_hdr); //TODO: get length self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?; self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr); } FrameType::Control | FrameType::Unknown => { //println!("Unknown frame type"); } } Ok(()) } } pub fn init_capture(conf: &D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> CaptureCtx { let sess = match conf.file { Some(ref f) => cap::PcapSession::from_file(f), None => { println!("No session file"); let sess_builder = match conf.interface { Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev), None => cap::PcapSessionBuilder::new() }; sess_builder.unwrap() .buffer_size(0xFFFF) .timeout(1000) .promisc(conf.promisc) .rfmon(conf.monitor) .activate() } }; let parser = match sess.datalink() { cap::DLT_ETHERNET => { Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser> } cap::DLT_IEEE802_11_RADIO => { Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser> } x => panic!("unsupported datalink type: {}", x) }; CaptureCtx { sess: sess, parser: parser } } pub fn
(conf: D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> { thread::Builder::new().name("packet_capture".to_owned()).spawn(move || { let mut cap = init_capture(&conf, pkt_sender, pd_sender); loop { cap.parse_next(); } }) } enum LoadMacError { IOError(io::Error), TomlError(Option<toml::de::Error>) } impl From<io::Error> for LoadMacError { fn from(err: io::Error) -> LoadMacError { LoadMacError::IOError(err) } } impl From<toml::de::Error> for LoadMacError { fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) } } fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> { let mut s = String::new(); let mut f = File::open(&file)?; f.read_to_string(&mut s)?; let t = s.parse::<toml::Value>()?; if let Some(k) = t.get(&"known-macs".to_owned()) { if let Some(tbl) = k.as_table() { return Ok(tbl.iter() .map(|(k, v)| (MacAddr::from_string(k), v.as_str())) .filter_map(|x| match x { (Some(addr), Some(alias)) => Some((addr, alias.to_owned())), _ => None }) .collect()) } } Err(LoadMacError::TomlError(None)) } fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> { let ui = UIServer::spawn(port, mac_map)?; pg_ctl.register_mac_listener(ui.create_sender()?); pg_ctl.register_ip4_listener(ui.create_sender()?); pg_ctl.register_ip6_listener(ui.create_sender()?); Ok(()) } pub type MacMap = HashMap<MacAddr, String>; pub type IP4Map = HashMap<IP4Addr, String>; pub type IP6Map = HashMap<IP6Addr, String>; #[derive(Clone)] pub struct D3capController { pub pg_ctrl: ProtoGraphController, pub pd_ctrl: PhysDataController, pub mac_names: MacMap, pub ip4_names: IP4Map, pub ip6_names: IP6Map, pub server_started: bool } impl D3capController { pub fn spawn(conf: D3capConf) -> io::Result<D3capController> { let mac_names = conf.conf.as_ref() .map_or_else(HashMap::new, |x| { load_mac_addrs(x).unwrap_or_else(|_| HashMap::new()) }); let ip4_names = HashMap::new(); let ip6_names = HashMap::new(); let pg_ctrl = ProtoGraphController::spawn()?; let pd_ctrl = PhysDataController::spawn()?; start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap(); Ok(D3capController { pg_ctrl: pg_ctrl, pd_ctrl: pd_ctrl, mac_names: mac_names, ip4_names: ip4_names, ip6_names: ip6_names, server_started: false }) } pub fn start_websocket(&mut self, port: u16) -> io::Result<()> { if self.server_started { println!("server already started"); } else { start_websocket(port, &self.mac_names, &self.pg_ctrl)?; self.server_started = true; } Ok(()) } } #[derive(Clone, Debug)] pub struct D3capConf { pub websocket: Option<u16>, pub interface: Option<String>, pub file: Option<String>, pub conf: Option<String>, pub promisc: bool, pub monitor: bool }
start_capture
identifier_name
d3cap.rs
use std::thread::{self, JoinHandle}; use std::hash::{Hash}; use std::collections::hash_map::{Entry, HashMap}; use std::fs::File; use std::io::{self, Read}; use std::sync::{Arc,RwLock}; use std::sync::mpsc::{channel, Sender, SendError}; use toml; use multicast::Multicast; use json_serve::uiserver::UIServer; use util::{ntohs, skip_bytes_cast, skip_cast}; use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header}; use ether::{EthernetHeader, MacAddr, ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X}; use dot11::{self, FrameType}; use tap; use pkt_graph::{PktMeta, ProtocolGraph, RouteStats}; use fixed_ring::FixedRingBuffer; use pcap::pcap as cap; #[derive(RustcEncodable, Clone)] struct RouteStatsMsg<T> { typ: &'static str, route: RouteStats<T>, } #[derive(Debug)] pub enum Pkt { Mac(PktMeta<MacAddr>), IP4(PktMeta<IP4Addr>), IP6(PktMeta<IP6Addr>), } #[derive(Clone)] pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> { pub typ: &'static str, pub graph: Arc<RwLock<ProtocolGraph<T>>>, stats_mcast: Multicast<RouteStatsMsg<T>>, } impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> { fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> { Ok(ProtocolHandler { typ: typ, graph: Arc::new(RwLock::new(ProtocolGraph::new())), stats_mcast: Multicast::spawn()? }) } fn update(&mut self, pkt: &PktMeta<T>) { let route_stats = { self.graph.write().unwrap().update(pkt) }; let route_stats_msg = Arc::new(RouteStatsMsg { typ: self.typ, route: route_stats }); self.stats_mcast.send(route_stats_msg).unwrap(); } } #[derive(Clone)] pub struct ProtoGraphController { pub cap_tx: Sender<Pkt>, pub mac: ProtocolHandler<MacAddr>, pub ip4: ProtocolHandler<IP4Addr>, pub ip6: ProtocolHandler<IP6Addr>, } impl ProtoGraphController { fn spawn() -> io::Result<ProtoGraphController> { let (cap_tx, cap_rx) = channel(); let ctl = ProtoGraphController { cap_tx: cap_tx, mac: ProtocolHandler::new("mac")?, ip4: ProtocolHandler::new("ip4")?, ip6: ProtocolHandler::new("ip6")?, }; let mut phctl = ctl.clone(); thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || { loop { let pkt = cap_rx.recv(); if pkt.is_err()
match pkt.unwrap() { Pkt::Mac(ref p) => phctl.mac.update(p), Pkt::IP4(ref p) => phctl.ip4.update(p), Pkt::IP6(ref p) => phctl.ip6.update(p), } } })?; Ok(ctl) } fn sender(&self) -> Sender<Pkt> { self.cap_tx.clone() } fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) { self.mac.stats_mcast.register(s).unwrap(); } fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) { self.ip4.stats_mcast.register(s).unwrap(); } fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) { self.ip6.stats_mcast.register(s).unwrap(); } } enum ParseErr { Send, UnknownPacket } impl<T> From<SendError<T>> for ParseErr { fn from(_: SendError<T>) -> ParseErr { ParseErr::Send } } trait PktParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>; } pub struct CaptureCtx { sess: cap::PcapSession, parser: Box<PktParser+'static> } impl CaptureCtx { fn parse_next(&mut self) { let p = &mut self.parser; self.sess.next(|cap| { match p.parse(cap) { _ => () //just ignore } }); } } struct EthernetParser { pkts: Sender<Pkt>, } impl PktParser for EthernetParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) }; self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?; match ether_hdr.typ { ETHERTYPE_ARP => { //io::println("ARP!"); }, ETHERTYPE_IP4 => { let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_IP6 => { let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) }; self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?; }, ETHERTYPE_802_1X => { //io::println("802.1X!"); }, _ => { //println!("Unknown type: {:x}", x); } } Ok(()) } } #[derive(Debug)] pub struct PhysData { // TODO: this name sucks frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, } impl PhysData { fn new(frame_ty: FrameType, addrs: [MacAddr; 3], rate: Option<tap::Rate>, channel: tap::Channel, antenna_signal: tap::AntennaSignal, antenna_noise: tap::AntennaNoise, antenna: tap::Antenna, ) -> PhysData { PhysData { frame_ty: frame_ty, addrs: addrs, rate: rate, channel: channel, antenna_signal: antenna_signal, antenna_noise: antenna_noise, antenna: antenna } } fn dist(&self) -> f32 { let freq = f32::from(self.channel.mhz); let signal = f32::from(self.antenna_signal.dbm); let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0; (10.0f32).powf(exp) } } #[derive(PartialEq, Eq, Hash)] pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]); pub struct PhysDataVal { pub dat: FixedRingBuffer<PhysData>, pub count: u32, } impl PhysDataVal { pub fn new() -> PhysDataVal { PhysDataVal { dat: FixedRingBuffer::new(10), count: 0 } } pub fn avg_dist(&self) -> f32 { let mut s = 0.0; for pd in self.dat.iter() { s += pd.dist(); } s / (self.dat.len() as f32) } } #[derive(Clone)] pub struct PhysDataController { pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>, pd_tx: Sender<PhysData> } impl PhysDataController { fn spawn() -> io::Result<PhysDataController> { let (pd_tx, pd_rx) = channel(); let out = PhysDataController { pd_tx: pd_tx, map: Arc::new(RwLock::new(HashMap::new())) }; let ctl = out.clone(); thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || { loop { let res = pd_rx.recv(); if res.is_err() { break } let pd = res.unwrap(); match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) { Entry::Occupied(mut e) => { let mut pdc = e.get_mut(); pdc.dat.push(pd); pdc.count += 1; } Entry::Vacant(e) => { let mut pdc = PhysDataVal::new(); pdc.dat.push(pd); pdc.count += 1; e.insert(pdc); } }; } })?; Ok(out) } fn sender(&self) -> Sender<PhysData> { self.pd_tx.clone() } } struct RadiotapParser { pkts: Sender<Pkt>, phys: Sender<PhysData> } impl RadiotapParser { fn parse_known_headers(&self, frame_ty: FrameType, addrs: [MacAddr; 3], tap_hdr: &tap::RadiotapHeader) { match tap_hdr.it_present { tap::ItPresent::COMMON_A => { if let Some(vals) = tap::CommonA::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, Some(vals.rate), vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, tap::ItPresent::COMMON_B => { if let Some(vals) = tap::CommonB::parse(tap_hdr) { self.phys.send(PhysData::new( frame_ty, addrs, None, vals.channel, vals.antenna_signal, vals.antenna_noise, vals.antenna )).unwrap(); } }, _ => {} //Unknown header } } } impl PktParser for RadiotapParser { fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> { fn magic<U>(pkt: &tap::RadiotapHeader) -> &U { unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) } } let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) }; let base: &dot11::Dot11BaseHeader = magic(tap_hdr); let fc = &base.fr_ctrl; if fc.protocol_version()!= 0 { // bogus packet, bail return Err(ParseErr::UnknownPacket); } match fc.frame_type() { ft @ FrameType::Management => { let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr); self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr); } ft @ FrameType::Data => { let data: &dot11::DataFrameHeader = magic(tap_hdr); //TODO: get length self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?; self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr); } FrameType::Control | FrameType::Unknown => { //println!("Unknown frame type"); } } Ok(()) } } pub fn init_capture(conf: &D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> CaptureCtx { let sess = match conf.file { Some(ref f) => cap::PcapSession::from_file(f), None => { println!("No session file"); let sess_builder = match conf.interface { Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev), None => cap::PcapSessionBuilder::new() }; sess_builder.unwrap() .buffer_size(0xFFFF) .timeout(1000) .promisc(conf.promisc) .rfmon(conf.monitor) .activate() } }; let parser = match sess.datalink() { cap::DLT_ETHERNET => { Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser> } cap::DLT_IEEE802_11_RADIO => { Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser> } x => panic!("unsupported datalink type: {}", x) }; CaptureCtx { sess: sess, parser: parser } } pub fn start_capture(conf: D3capConf, pkt_sender: Sender<Pkt>, pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> { thread::Builder::new().name("packet_capture".to_owned()).spawn(move || { let mut cap = init_capture(&conf, pkt_sender, pd_sender); loop { cap.parse_next(); } }) } enum LoadMacError { IOError(io::Error), TomlError(Option<toml::de::Error>) } impl From<io::Error> for LoadMacError { fn from(err: io::Error) -> LoadMacError { LoadMacError::IOError(err) } } impl From<toml::de::Error> for LoadMacError { fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) } } fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> { let mut s = String::new(); let mut f = File::open(&file)?; f.read_to_string(&mut s)?; let t = s.parse::<toml::Value>()?; if let Some(k) = t.get(&"known-macs".to_owned()) { if let Some(tbl) = k.as_table() { return Ok(tbl.iter() .map(|(k, v)| (MacAddr::from_string(k), v.as_str())) .filter_map(|x| match x { (Some(addr), Some(alias)) => Some((addr, alias.to_owned())), _ => None }) .collect()) } } Err(LoadMacError::TomlError(None)) } fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> { let ui = UIServer::spawn(port, mac_map)?; pg_ctl.register_mac_listener(ui.create_sender()?); pg_ctl.register_ip4_listener(ui.create_sender()?); pg_ctl.register_ip6_listener(ui.create_sender()?); Ok(()) } pub type MacMap = HashMap<MacAddr, String>; pub type IP4Map = HashMap<IP4Addr, String>; pub type IP6Map = HashMap<IP6Addr, String>; #[derive(Clone)] pub struct D3capController { pub pg_ctrl: ProtoGraphController, pub pd_ctrl: PhysDataController, pub mac_names: MacMap, pub ip4_names: IP4Map, pub ip6_names: IP6Map, pub server_started: bool } impl D3capController { pub fn spawn(conf: D3capConf) -> io::Result<D3capController> { let mac_names = conf.conf.as_ref() .map_or_else(HashMap::new, |x| { load_mac_addrs(x).unwrap_or_else(|_| HashMap::new()) }); let ip4_names = HashMap::new(); let ip6_names = HashMap::new(); let pg_ctrl = ProtoGraphController::spawn()?; let pd_ctrl = PhysDataController::spawn()?; start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap(); Ok(D3capController { pg_ctrl: pg_ctrl, pd_ctrl: pd_ctrl, mac_names: mac_names, ip4_names: ip4_names, ip6_names: ip6_names, server_started: false }) } pub fn start_websocket(&mut self, port: u16) -> io::Result<()> { if self.server_started { println!("server already started"); } else { start_websocket(port, &self.mac_names, &self.pg_ctrl)?; self.server_started = true; } Ok(()) } } #[derive(Clone, Debug)] pub struct D3capConf { pub websocket: Option<u16>, pub interface: Option<String>, pub file: Option<String>, pub conf: Option<String>, pub promisc: bool, pub monitor: bool }
{ break }
conditional_block
mod.rs
//! This module implements the global `Function` object as well as creates Native Functions. //! //! Objects wrap `Function`s and expose them via call/construct slots. //! //! `The `Function` object is used for matching text with a pattern. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-function-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function use std::{ fmt, ops::{Deref, DerefMut}, }; use dyn_clone::DynClone; use crate::{ builtins::BuiltIn, context::StandardObjects, environment::lexical_environment::Environment, gc::{Finalize, Trace}, object::JsObject, object::{ internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder, NativeObject, ObjectData, }, property::Attribute, property::PropertyDescriptor, syntax::ast::node::{FormalParameter, RcStatementList}, BoaProfiler, Context, JsResult, JsValue, }; use super::JsArgs; pub(crate) mod arguments; #[cfg(test)] mod tests; /// Type representing a native built-in function a.k.a. function pointer. /// /// Native functions need to have this signature in order to /// be callable from Javascript. pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>; // Allows restricting closures to only `Copy` ones. // Used the sealed pattern to disallow external implementations // of `DynCopy`. mod sealed { pub trait Sealed {} impl<T: Copy> Sealed for T {} } pub trait DynCopy: sealed::Sealed {} impl<T: Copy> DynCopy for T {} /// Trait representing a native built-in closure. /// /// Closures need to have this signature in order to /// be callable from Javascript, but most of the time the compiler /// is smart enough to correctly infer the types. pub trait ClosureFunctionSignature: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone +'static { } // The `Copy` bound automatically infers `DynCopy` and `DynClone` impl<T> ClosureFunctionSignature for T where T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy +'static { } // Allows cloning Box<dyn ClosureFunctionSignature> dyn_clone::clone_trait_object!(ClosureFunctionSignature); #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ThisMode { Lexical, Strict, Global, } impl ThisMode { /// Returns `true` if the this mode is `Lexical`. pub fn is_lexical(&self) -> bool { matches!(self, Self::Lexical) } /// Returns `true` if the this mode is `Strict`. pub fn is_strict(&self) -> bool { matches!(self, Self::Strict) } /// Returns `true` if the this mode is `Global`. pub fn is_global(&self) -> bool
} #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ConstructorKind { Base, Derived, } impl ConstructorKind { /// Returns `true` if the constructor kind is `Base`. pub fn is_base(&self) -> bool { matches!(self, Self::Base) } /// Returns `true` if the constructor kind is `Derived`. pub fn is_derived(&self) -> bool { matches!(self, Self::Derived) } } // We don't use a standalone `NativeObject` for `Captures` because it doesn't // guarantee that the internal type implements `Clone`. // This private trait guarantees that the internal type passed to `Captures` // implements `Clone`, and `DynClone` allows us to implement `Clone` for // `Box<dyn CapturesObject>`. trait CapturesObject: NativeObject + DynClone {} impl<T: NativeObject + Clone> CapturesObject for T {} dyn_clone::clone_trait_object!(CapturesObject); /// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional /// captures through a `Copy` closure. /// /// Any type implementing `Trace + Any + Debug + Clone` /// can be used as a capture context, so you can pass e.g. a String, /// a tuple or even a full struct. /// /// You can downcast to any type and handle the fail case as you like /// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref` /// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast /// fails. #[derive(Debug, Clone, Trace, Finalize)] pub struct Captures(Box<dyn CapturesObject>); impl Captures { /// Creates a new capture context. pub(crate) fn new<T>(captures: T) -> Self where T: NativeObject + Clone, { Self(Box::new(captures)) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or `None` otherwise. pub fn downcast_ref<T>(&self) -> Option<&T> where T: NativeObject + Clone, { self.0.deref().as_any().downcast_ref::<T>() } /// Mutably downcasts `Captures` to the specified type, returning a /// mutable reference to the downcasted type if successful or `None` otherwise. pub fn downcast_mut<T>(&mut self) -> Option<&mut T> where T: NativeObject + Clone, { self.0.deref_mut().as_mut_any().downcast_mut::<T>() } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T> where T: NativeObject + Clone, { self.0 .deref() .as_any() .downcast_ref::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T> where T: NativeObject + Clone, { self.0 .deref_mut() .as_mut_any() .downcast_mut::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } } /// Boa representation of a Function Object. /// /// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node) /// /// <https://tc39.es/ecma262/#sec-ecmascript-function-objects> #[derive(Clone, Trace, Finalize)] pub enum Function { Native { #[unsafe_ignore_trace] function: NativeFunctionSignature, constructable: bool, }, Closure { #[unsafe_ignore_trace] function: Box<dyn ClosureFunctionSignature>, constructable: bool, captures: Captures, }, Ordinary { constructable: bool, this_mode: ThisMode, body: RcStatementList, params: Box<[FormalParameter]>, environment: Environment, }, #[cfg(feature = "vm")] VmOrdinary { code: gc::Gc<crate::vm::CodeBlock>, environment: Environment, }, } impl fmt::Debug for Function { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Function {{... }}") } } impl Function { // Adds the final rest parameters to the Environment as an array #[cfg(not(feature = "vm"))] pub(crate) fn add_rest_param( param: &FormalParameter, index: usize, args_list: &[JsValue], context: &mut Context, local_env: &Environment, ) { use crate::builtins::Array; // Create array of values let array = Array::new_array(context); Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context) .unwrap(); // Create binding local_env // Function parameters can share names in JavaScript... .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding for rest param"); // Set Binding to value local_env .initialize_binding(param.name(), array, context) .expect("Failed to initialize rest param"); } // Adds an argument to the environment pub(crate) fn add_arguments_to_environment( param: &FormalParameter, value: JsValue, local_env: &Environment, context: &mut Context, ) { // Create binding local_env .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding"); // Set Binding to value local_env .initialize_binding(param.name(), value, context) .expect("Failed to intialize binding"); } /// Returns true if the function object is constructable. pub fn is_constructable(&self) -> bool { match self { Self::Native { constructable,.. } => *constructable, Self::Closure { constructable,.. } => *constructable, Self::Ordinary { constructable,.. } => *constructable, #[cfg(feature = "vm")] Self::VmOrdinary { code,.. } => code.constructable, } } } /// Creates a new member function of a `Object` or `prototype`. /// /// A function registered using this macro can then be called from Javascript using: /// /// parent.name() /// /// See the javascript 'Number.toString()' as an example. /// /// # Arguments /// function: The function to register as a built in function. /// name: The name of the function (how it will be called but without the ()). /// parent: The object to register the function on, if the global object is used then the function is instead called as name() /// without requiring the parent, see parseInt() as an example. /// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that /// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with /// some other number of arguments. /// /// If no length is provided, the length will be set to 0. // TODO: deprecate/remove this. pub(crate) fn make_builtin_fn<N>( function: NativeFunctionSignature, name: N, parent: &JsObject, length: usize, interpreter: &Context, ) where N: Into<String>, { let name = name.into(); let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init"); let function = JsObject::from_proto_and_data( interpreter.standard_objects().function_object().prototype(), ObjectData::function(Function::Native { function, constructable: false, }), ); let attribute = PropertyDescriptor::builder() .writable(false) .enumerable(false) .configurable(true); function.insert_property("length", attribute.clone().value(length)); function.insert_property("name", attribute.value(name.as_str())); parent.clone().insert_property( name, PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true), ); } #[derive(Debug, Clone, Copy)] pub struct BuiltInFunctionObject; impl BuiltInFunctionObject { pub const LENGTH: usize = 1; fn constructor( new_target: &JsValue, _: &[JsValue], context: &mut Context, ) -> JsResult<JsValue> { let prototype = get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?; let this = JsObject::from_proto_and_data( prototype, ObjectData::function(Function::Native { function: |_, _, _| Ok(JsValue::undefined()), constructable: true, }), ); Ok(this.into()) } fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> { Ok(JsValue::undefined()) } /// `Function.prototype.call` /// /// The call() method invokes self with the first argument as the `this` value. /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); // TODO?: 3. Perform PrepareForTailCall let start = if!args.is_empty() { 1 } else { 0 }; context.call(this, this_arg, &args[start..]) } /// `Function.prototype.apply` /// /// The apply() method invokes self with the first argument as the `this` value /// and the rest of the arguments provided as an array (or an array-like object). /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); let arg_array = args.get_or_undefined(1); if arg_array.is_null_or_undefined() { // TODO?: 3.a. PrepareForTailCall return context.call(this, this_arg, &[]); } let arg_list = arg_array.create_list_from_array_like(&[], context)?; // TODO?: 5. PrepareForTailCall context.call(this, this_arg, &arg_list) } #[allow(clippy::wrong_self_convention)] fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> { let name = { // Is there a case here where if there is no name field on a value // name should default to None? Do all functions have names set? let value = this.get_field("name", &mut *context)?; if value.is_null_or_undefined() { None } else { Some(value.to_string(context)?) } }; let function = { let object = this .as_object() .map(|object| object.borrow().as_function().cloned()); if let Some(Some(function)) = object { function } else { return context.throw_type_error("Not a function"); } }; match (&function, name) { ( Function::Native { function: _, constructable: _, }, Some(name), ) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()), (Function::Ordinary { body, params,.. }, Some(name)) => { let arguments: String = params .iter() .map(|param| param.name()) .collect::<Vec<&str>>() .join(", "); let statement_list = &*body; // This is a kluge. The implementaion in browser seems to suggest that // the value here is printed exactly as defined in source. I'm not sure if // that's possible here, but for now here's a dumb heuristic that prints functions let is_multiline = { let value = statement_list.to_string(); value.lines().count() > 1 }; if is_multiline { Ok( //?? For some reason statement_list string implementation // sticks a \n at the end no matter what format!( "{}({}) {{\n{}}}", &name, arguments, statement_list.to_string() ) .into(), ) } else { Ok(format!( "{}({}) {{{}}}", &name, arguments, // The trim here is to remove a \n stuck at the end // of the statement_list to_string method statement_list.to_string().trim() ) .into()) } } _ => Ok("TODO".into()), } } } impl BuiltIn for BuiltInFunctionObject { const NAME: &'static str = "Function"; const ATTRIBUTE: Attribute = Attribute::WRITABLE .union(Attribute::NON_ENUMERABLE) .union(Attribute::CONFIGURABLE); fn init(context: &mut Context) -> JsValue { let _timer = BoaProfiler::global().start_event("function", "init"); let function_prototype = context.standard_objects().function_object().prototype(); FunctionBuilder::native(context, Self::prototype) .name("") .length(0) .constructable(false) .build_function_prototype(&function_prototype); let function_object = ConstructorBuilder::with_standard_object( context, Self::constructor, context.standard_objects().function_object().clone(), ) .name(Self::NAME) .length(Self::LENGTH) .method(Self::call, "call", 1) .method(Self::apply, "apply", 1) .method(Self::to_string, "toString", 0) .build(); function_object.into() } }
{ matches!(self, Self::Global) }
identifier_body
mod.rs
//! This module implements the global `Function` object as well as creates Native Functions. //! //! Objects wrap `Function`s and expose them via call/construct slots. //! //! `The `Function` object is used for matching text with a pattern. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-function-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function use std::{ fmt, ops::{Deref, DerefMut}, }; use dyn_clone::DynClone; use crate::{ builtins::BuiltIn, context::StandardObjects, environment::lexical_environment::Environment, gc::{Finalize, Trace}, object::JsObject, object::{ internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder, NativeObject, ObjectData, }, property::Attribute, property::PropertyDescriptor, syntax::ast::node::{FormalParameter, RcStatementList}, BoaProfiler, Context, JsResult, JsValue, }; use super::JsArgs; pub(crate) mod arguments; #[cfg(test)] mod tests; /// Type representing a native built-in function a.k.a. function pointer. /// /// Native functions need to have this signature in order to /// be callable from Javascript. pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>; // Allows restricting closures to only `Copy` ones. // Used the sealed pattern to disallow external implementations // of `DynCopy`. mod sealed { pub trait Sealed {} impl<T: Copy> Sealed for T {} } pub trait DynCopy: sealed::Sealed {} impl<T: Copy> DynCopy for T {} /// Trait representing a native built-in closure. /// /// Closures need to have this signature in order to /// be callable from Javascript, but most of the time the compiler /// is smart enough to correctly infer the types. pub trait ClosureFunctionSignature: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone +'static { } // The `Copy` bound automatically infers `DynCopy` and `DynClone` impl<T> ClosureFunctionSignature for T where T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy +'static { } // Allows cloning Box<dyn ClosureFunctionSignature> dyn_clone::clone_trait_object!(ClosureFunctionSignature); #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ThisMode { Lexical, Strict, Global, } impl ThisMode { /// Returns `true` if the this mode is `Lexical`. pub fn is_lexical(&self) -> bool { matches!(self, Self::Lexical) } /// Returns `true` if the this mode is `Strict`. pub fn is_strict(&self) -> bool { matches!(self, Self::Strict) } /// Returns `true` if the this mode is `Global`. pub fn is_global(&self) -> bool { matches!(self, Self::Global) } } #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ConstructorKind { Base, Derived, } impl ConstructorKind { /// Returns `true` if the constructor kind is `Base`. pub fn is_base(&self) -> bool { matches!(self, Self::Base) } /// Returns `true` if the constructor kind is `Derived`. pub fn is_derived(&self) -> bool { matches!(self, Self::Derived) } } // We don't use a standalone `NativeObject` for `Captures` because it doesn't // guarantee that the internal type implements `Clone`. // This private trait guarantees that the internal type passed to `Captures` // implements `Clone`, and `DynClone` allows us to implement `Clone` for // `Box<dyn CapturesObject>`. trait CapturesObject: NativeObject + DynClone {} impl<T: NativeObject + Clone> CapturesObject for T {} dyn_clone::clone_trait_object!(CapturesObject); /// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional /// captures through a `Copy` closure. /// /// Any type implementing `Trace + Any + Debug + Clone` /// can be used as a capture context, so you can pass e.g. a String, /// a tuple or even a full struct. /// /// You can downcast to any type and handle the fail case as you like /// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref` /// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast /// fails. #[derive(Debug, Clone, Trace, Finalize)] pub struct Captures(Box<dyn CapturesObject>); impl Captures { /// Creates a new capture context. pub(crate) fn new<T>(captures: T) -> Self where T: NativeObject + Clone, { Self(Box::new(captures)) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or `None` otherwise. pub fn downcast_ref<T>(&self) -> Option<&T> where T: NativeObject + Clone, { self.0.deref().as_any().downcast_ref::<T>() } /// Mutably downcasts `Captures` to the specified type, returning a /// mutable reference to the downcasted type if successful or `None` otherwise. pub fn downcast_mut<T>(&mut self) -> Option<&mut T> where T: NativeObject + Clone, { self.0.deref_mut().as_mut_any().downcast_mut::<T>() } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T> where T: NativeObject + Clone, { self.0 .deref() .as_any() .downcast_ref::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T> where T: NativeObject + Clone, { self.0 .deref_mut() .as_mut_any() .downcast_mut::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } } /// Boa representation of a Function Object. /// /// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node) /// /// <https://tc39.es/ecma262/#sec-ecmascript-function-objects> #[derive(Clone, Trace, Finalize)] pub enum Function { Native { #[unsafe_ignore_trace] function: NativeFunctionSignature, constructable: bool, }, Closure { #[unsafe_ignore_trace] function: Box<dyn ClosureFunctionSignature>, constructable: bool, captures: Captures, }, Ordinary { constructable: bool, this_mode: ThisMode, body: RcStatementList, params: Box<[FormalParameter]>, environment: Environment, }, #[cfg(feature = "vm")] VmOrdinary { code: gc::Gc<crate::vm::CodeBlock>, environment: Environment, }, } impl fmt::Debug for Function { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Function {{... }}") } } impl Function { // Adds the final rest parameters to the Environment as an array #[cfg(not(feature = "vm"))] pub(crate) fn add_rest_param( param: &FormalParameter, index: usize, args_list: &[JsValue], context: &mut Context, local_env: &Environment, ) { use crate::builtins::Array; // Create array of values let array = Array::new_array(context); Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context) .unwrap(); // Create binding local_env // Function parameters can share names in JavaScript... .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding for rest param"); // Set Binding to value local_env .initialize_binding(param.name(), array, context) .expect("Failed to initialize rest param"); } // Adds an argument to the environment pub(crate) fn add_arguments_to_environment( param: &FormalParameter, value: JsValue, local_env: &Environment, context: &mut Context, ) { // Create binding local_env .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding"); // Set Binding to value local_env .initialize_binding(param.name(), value, context) .expect("Failed to intialize binding"); } /// Returns true if the function object is constructable. pub fn is_constructable(&self) -> bool { match self { Self::Native { constructable,.. } => *constructable, Self::Closure { constructable,.. } => *constructable, Self::Ordinary { constructable,.. } => *constructable, #[cfg(feature = "vm")] Self::VmOrdinary { code,.. } => code.constructable, } } } /// Creates a new member function of a `Object` or `prototype`. /// /// A function registered using this macro can then be called from Javascript using: /// /// parent.name() /// /// See the javascript 'Number.toString()' as an example. /// /// # Arguments /// function: The function to register as a built in function. /// name: The name of the function (how it will be called but without the ()). /// parent: The object to register the function on, if the global object is used then the function is instead called as name() /// without requiring the parent, see parseInt() as an example. /// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that /// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with /// some other number of arguments. /// /// If no length is provided, the length will be set to 0. // TODO: deprecate/remove this. pub(crate) fn make_builtin_fn<N>( function: NativeFunctionSignature, name: N, parent: &JsObject, length: usize, interpreter: &Context, ) where N: Into<String>, { let name = name.into(); let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init"); let function = JsObject::from_proto_and_data( interpreter.standard_objects().function_object().prototype(), ObjectData::function(Function::Native { function, constructable: false, }), ); let attribute = PropertyDescriptor::builder() .writable(false) .enumerable(false) .configurable(true); function.insert_property("length", attribute.clone().value(length)); function.insert_property("name", attribute.value(name.as_str())); parent.clone().insert_property( name, PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true), ); } #[derive(Debug, Clone, Copy)] pub struct BuiltInFunctionObject; impl BuiltInFunctionObject { pub const LENGTH: usize = 1; fn constructor( new_target: &JsValue, _: &[JsValue], context: &mut Context, ) -> JsResult<JsValue> { let prototype = get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?; let this = JsObject::from_proto_and_data( prototype, ObjectData::function(Function::Native { function: |_, _, _| Ok(JsValue::undefined()), constructable: true, }), ); Ok(this.into()) } fn
(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> { Ok(JsValue::undefined()) } /// `Function.prototype.call` /// /// The call() method invokes self with the first argument as the `this` value. /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); // TODO?: 3. Perform PrepareForTailCall let start = if!args.is_empty() { 1 } else { 0 }; context.call(this, this_arg, &args[start..]) } /// `Function.prototype.apply` /// /// The apply() method invokes self with the first argument as the `this` value /// and the rest of the arguments provided as an array (or an array-like object). /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); let arg_array = args.get_or_undefined(1); if arg_array.is_null_or_undefined() { // TODO?: 3.a. PrepareForTailCall return context.call(this, this_arg, &[]); } let arg_list = arg_array.create_list_from_array_like(&[], context)?; // TODO?: 5. PrepareForTailCall context.call(this, this_arg, &arg_list) } #[allow(clippy::wrong_self_convention)] fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> { let name = { // Is there a case here where if there is no name field on a value // name should default to None? Do all functions have names set? let value = this.get_field("name", &mut *context)?; if value.is_null_or_undefined() { None } else { Some(value.to_string(context)?) } }; let function = { let object = this .as_object() .map(|object| object.borrow().as_function().cloned()); if let Some(Some(function)) = object { function } else { return context.throw_type_error("Not a function"); } }; match (&function, name) { ( Function::Native { function: _, constructable: _, }, Some(name), ) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()), (Function::Ordinary { body, params,.. }, Some(name)) => { let arguments: String = params .iter() .map(|param| param.name()) .collect::<Vec<&str>>() .join(", "); let statement_list = &*body; // This is a kluge. The implementaion in browser seems to suggest that // the value here is printed exactly as defined in source. I'm not sure if // that's possible here, but for now here's a dumb heuristic that prints functions let is_multiline = { let value = statement_list.to_string(); value.lines().count() > 1 }; if is_multiline { Ok( //?? For some reason statement_list string implementation // sticks a \n at the end no matter what format!( "{}({}) {{\n{}}}", &name, arguments, statement_list.to_string() ) .into(), ) } else { Ok(format!( "{}({}) {{{}}}", &name, arguments, // The trim here is to remove a \n stuck at the end // of the statement_list to_string method statement_list.to_string().trim() ) .into()) } } _ => Ok("TODO".into()), } } } impl BuiltIn for BuiltInFunctionObject { const NAME: &'static str = "Function"; const ATTRIBUTE: Attribute = Attribute::WRITABLE .union(Attribute::NON_ENUMERABLE) .union(Attribute::CONFIGURABLE); fn init(context: &mut Context) -> JsValue { let _timer = BoaProfiler::global().start_event("function", "init"); let function_prototype = context.standard_objects().function_object().prototype(); FunctionBuilder::native(context, Self::prototype) .name("") .length(0) .constructable(false) .build_function_prototype(&function_prototype); let function_object = ConstructorBuilder::with_standard_object( context, Self::constructor, context.standard_objects().function_object().clone(), ) .name(Self::NAME) .length(Self::LENGTH) .method(Self::call, "call", 1) .method(Self::apply, "apply", 1) .method(Self::to_string, "toString", 0) .build(); function_object.into() } }
prototype
identifier_name
mod.rs
//! This module implements the global `Function` object as well as creates Native Functions. //! //! Objects wrap `Function`s and expose them via call/construct slots. //! //! `The `Function` object is used for matching text with a pattern. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-function-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function use std::{ fmt, ops::{Deref, DerefMut}, }; use dyn_clone::DynClone; use crate::{ builtins::BuiltIn, context::StandardObjects, environment::lexical_environment::Environment, gc::{Finalize, Trace}, object::JsObject, object::{ internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder, NativeObject, ObjectData, }, property::Attribute, property::PropertyDescriptor, syntax::ast::node::{FormalParameter, RcStatementList}, BoaProfiler, Context, JsResult, JsValue, }; use super::JsArgs; pub(crate) mod arguments; #[cfg(test)] mod tests; /// Type representing a native built-in function a.k.a. function pointer. /// /// Native functions need to have this signature in order to /// be callable from Javascript. pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>; // Allows restricting closures to only `Copy` ones. // Used the sealed pattern to disallow external implementations // of `DynCopy`. mod sealed { pub trait Sealed {} impl<T: Copy> Sealed for T {} } pub trait DynCopy: sealed::Sealed {} impl<T: Copy> DynCopy for T {} /// Trait representing a native built-in closure. /// /// Closures need to have this signature in order to /// be callable from Javascript, but most of the time the compiler /// is smart enough to correctly infer the types. pub trait ClosureFunctionSignature: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone +'static { } // The `Copy` bound automatically infers `DynCopy` and `DynClone` impl<T> ClosureFunctionSignature for T where T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy +'static { } // Allows cloning Box<dyn ClosureFunctionSignature> dyn_clone::clone_trait_object!(ClosureFunctionSignature); #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ThisMode { Lexical, Strict, Global, } impl ThisMode { /// Returns `true` if the this mode is `Lexical`. pub fn is_lexical(&self) -> bool { matches!(self, Self::Lexical) } /// Returns `true` if the this mode is `Strict`. pub fn is_strict(&self) -> bool { matches!(self, Self::Strict) } /// Returns `true` if the this mode is `Global`. pub fn is_global(&self) -> bool { matches!(self, Self::Global) } } #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ConstructorKind { Base, Derived, } impl ConstructorKind { /// Returns `true` if the constructor kind is `Base`. pub fn is_base(&self) -> bool { matches!(self, Self::Base) } /// Returns `true` if the constructor kind is `Derived`. pub fn is_derived(&self) -> bool { matches!(self, Self::Derived) } } // We don't use a standalone `NativeObject` for `Captures` because it doesn't // guarantee that the internal type implements `Clone`. // This private trait guarantees that the internal type passed to `Captures` // implements `Clone`, and `DynClone` allows us to implement `Clone` for // `Box<dyn CapturesObject>`. trait CapturesObject: NativeObject + DynClone {} impl<T: NativeObject + Clone> CapturesObject for T {} dyn_clone::clone_trait_object!(CapturesObject); /// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional /// captures through a `Copy` closure. /// /// Any type implementing `Trace + Any + Debug + Clone` /// can be used as a capture context, so you can pass e.g. a String, /// a tuple or even a full struct. /// /// You can downcast to any type and handle the fail case as you like /// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref` /// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast /// fails. #[derive(Debug, Clone, Trace, Finalize)] pub struct Captures(Box<dyn CapturesObject>); impl Captures { /// Creates a new capture context. pub(crate) fn new<T>(captures: T) -> Self where T: NativeObject + Clone, { Self(Box::new(captures)) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or `None` otherwise. pub fn downcast_ref<T>(&self) -> Option<&T> where T: NativeObject + Clone, { self.0.deref().as_any().downcast_ref::<T>() } /// Mutably downcasts `Captures` to the specified type, returning a /// mutable reference to the downcasted type if successful or `None` otherwise. pub fn downcast_mut<T>(&mut self) -> Option<&mut T> where T: NativeObject + Clone, { self.0.deref_mut().as_mut_any().downcast_mut::<T>() } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T> where T: NativeObject + Clone, { self.0 .deref() .as_any() .downcast_ref::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T> where T: NativeObject + Clone, { self.0
} /// Boa representation of a Function Object. /// /// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node) /// /// <https://tc39.es/ecma262/#sec-ecmascript-function-objects> #[derive(Clone, Trace, Finalize)] pub enum Function { Native { #[unsafe_ignore_trace] function: NativeFunctionSignature, constructable: bool, }, Closure { #[unsafe_ignore_trace] function: Box<dyn ClosureFunctionSignature>, constructable: bool, captures: Captures, }, Ordinary { constructable: bool, this_mode: ThisMode, body: RcStatementList, params: Box<[FormalParameter]>, environment: Environment, }, #[cfg(feature = "vm")] VmOrdinary { code: gc::Gc<crate::vm::CodeBlock>, environment: Environment, }, } impl fmt::Debug for Function { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Function {{... }}") } } impl Function { // Adds the final rest parameters to the Environment as an array #[cfg(not(feature = "vm"))] pub(crate) fn add_rest_param( param: &FormalParameter, index: usize, args_list: &[JsValue], context: &mut Context, local_env: &Environment, ) { use crate::builtins::Array; // Create array of values let array = Array::new_array(context); Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context) .unwrap(); // Create binding local_env // Function parameters can share names in JavaScript... .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding for rest param"); // Set Binding to value local_env .initialize_binding(param.name(), array, context) .expect("Failed to initialize rest param"); } // Adds an argument to the environment pub(crate) fn add_arguments_to_environment( param: &FormalParameter, value: JsValue, local_env: &Environment, context: &mut Context, ) { // Create binding local_env .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding"); // Set Binding to value local_env .initialize_binding(param.name(), value, context) .expect("Failed to intialize binding"); } /// Returns true if the function object is constructable. pub fn is_constructable(&self) -> bool { match self { Self::Native { constructable,.. } => *constructable, Self::Closure { constructable,.. } => *constructable, Self::Ordinary { constructable,.. } => *constructable, #[cfg(feature = "vm")] Self::VmOrdinary { code,.. } => code.constructable, } } } /// Creates a new member function of a `Object` or `prototype`. /// /// A function registered using this macro can then be called from Javascript using: /// /// parent.name() /// /// See the javascript 'Number.toString()' as an example. /// /// # Arguments /// function: The function to register as a built in function. /// name: The name of the function (how it will be called but without the ()). /// parent: The object to register the function on, if the global object is used then the function is instead called as name() /// without requiring the parent, see parseInt() as an example. /// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that /// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with /// some other number of arguments. /// /// If no length is provided, the length will be set to 0. // TODO: deprecate/remove this. pub(crate) fn make_builtin_fn<N>( function: NativeFunctionSignature, name: N, parent: &JsObject, length: usize, interpreter: &Context, ) where N: Into<String>, { let name = name.into(); let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init"); let function = JsObject::from_proto_and_data( interpreter.standard_objects().function_object().prototype(), ObjectData::function(Function::Native { function, constructable: false, }), ); let attribute = PropertyDescriptor::builder() .writable(false) .enumerable(false) .configurable(true); function.insert_property("length", attribute.clone().value(length)); function.insert_property("name", attribute.value(name.as_str())); parent.clone().insert_property( name, PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true), ); } #[derive(Debug, Clone, Copy)] pub struct BuiltInFunctionObject; impl BuiltInFunctionObject { pub const LENGTH: usize = 1; fn constructor( new_target: &JsValue, _: &[JsValue], context: &mut Context, ) -> JsResult<JsValue> { let prototype = get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?; let this = JsObject::from_proto_and_data( prototype, ObjectData::function(Function::Native { function: |_, _, _| Ok(JsValue::undefined()), constructable: true, }), ); Ok(this.into()) } fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> { Ok(JsValue::undefined()) } /// `Function.prototype.call` /// /// The call() method invokes self with the first argument as the `this` value. /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); // TODO?: 3. Perform PrepareForTailCall let start = if!args.is_empty() { 1 } else { 0 }; context.call(this, this_arg, &args[start..]) } /// `Function.prototype.apply` /// /// The apply() method invokes self with the first argument as the `this` value /// and the rest of the arguments provided as an array (or an array-like object). /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); let arg_array = args.get_or_undefined(1); if arg_array.is_null_or_undefined() { // TODO?: 3.a. PrepareForTailCall return context.call(this, this_arg, &[]); } let arg_list = arg_array.create_list_from_array_like(&[], context)?; // TODO?: 5. PrepareForTailCall context.call(this, this_arg, &arg_list) } #[allow(clippy::wrong_self_convention)] fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> { let name = { // Is there a case here where if there is no name field on a value // name should default to None? Do all functions have names set? let value = this.get_field("name", &mut *context)?; if value.is_null_or_undefined() { None } else { Some(value.to_string(context)?) } }; let function = { let object = this .as_object() .map(|object| object.borrow().as_function().cloned()); if let Some(Some(function)) = object { function } else { return context.throw_type_error("Not a function"); } }; match (&function, name) { ( Function::Native { function: _, constructable: _, }, Some(name), ) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()), (Function::Ordinary { body, params,.. }, Some(name)) => { let arguments: String = params .iter() .map(|param| param.name()) .collect::<Vec<&str>>() .join(", "); let statement_list = &*body; // This is a kluge. The implementaion in browser seems to suggest that // the value here is printed exactly as defined in source. I'm not sure if // that's possible here, but for now here's a dumb heuristic that prints functions let is_multiline = { let value = statement_list.to_string(); value.lines().count() > 1 }; if is_multiline { Ok( //?? For some reason statement_list string implementation // sticks a \n at the end no matter what format!( "{}({}) {{\n{}}}", &name, arguments, statement_list.to_string() ) .into(), ) } else { Ok(format!( "{}({}) {{{}}}", &name, arguments, // The trim here is to remove a \n stuck at the end // of the statement_list to_string method statement_list.to_string().trim() ) .into()) } } _ => Ok("TODO".into()), } } } impl BuiltIn for BuiltInFunctionObject { const NAME: &'static str = "Function"; const ATTRIBUTE: Attribute = Attribute::WRITABLE .union(Attribute::NON_ENUMERABLE) .union(Attribute::CONFIGURABLE); fn init(context: &mut Context) -> JsValue { let _timer = BoaProfiler::global().start_event("function", "init"); let function_prototype = context.standard_objects().function_object().prototype(); FunctionBuilder::native(context, Self::prototype) .name("") .length(0) .constructable(false) .build_function_prototype(&function_prototype); let function_object = ConstructorBuilder::with_standard_object( context, Self::constructor, context.standard_objects().function_object().clone(), ) .name(Self::NAME) .length(Self::LENGTH) .method(Self::call, "call", 1) .method(Self::apply, "apply", 1) .method(Self::to_string, "toString", 0) .build(); function_object.into() } }
.deref_mut() .as_mut_any() .downcast_mut::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) }
random_line_split
mod.rs
//! This module implements the global `Function` object as well as creates Native Functions. //! //! Objects wrap `Function`s and expose them via call/construct slots. //! //! `The `Function` object is used for matching text with a pattern. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-function-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function use std::{ fmt, ops::{Deref, DerefMut}, }; use dyn_clone::DynClone; use crate::{ builtins::BuiltIn, context::StandardObjects, environment::lexical_environment::Environment, gc::{Finalize, Trace}, object::JsObject, object::{ internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder, NativeObject, ObjectData, }, property::Attribute, property::PropertyDescriptor, syntax::ast::node::{FormalParameter, RcStatementList}, BoaProfiler, Context, JsResult, JsValue, }; use super::JsArgs; pub(crate) mod arguments; #[cfg(test)] mod tests; /// Type representing a native built-in function a.k.a. function pointer. /// /// Native functions need to have this signature in order to /// be callable from Javascript. pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>; // Allows restricting closures to only `Copy` ones. // Used the sealed pattern to disallow external implementations // of `DynCopy`. mod sealed { pub trait Sealed {} impl<T: Copy> Sealed for T {} } pub trait DynCopy: sealed::Sealed {} impl<T: Copy> DynCopy for T {} /// Trait representing a native built-in closure. /// /// Closures need to have this signature in order to /// be callable from Javascript, but most of the time the compiler /// is smart enough to correctly infer the types. pub trait ClosureFunctionSignature: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone +'static { } // The `Copy` bound automatically infers `DynCopy` and `DynClone` impl<T> ClosureFunctionSignature for T where T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy +'static { } // Allows cloning Box<dyn ClosureFunctionSignature> dyn_clone::clone_trait_object!(ClosureFunctionSignature); #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ThisMode { Lexical, Strict, Global, } impl ThisMode { /// Returns `true` if the this mode is `Lexical`. pub fn is_lexical(&self) -> bool { matches!(self, Self::Lexical) } /// Returns `true` if the this mode is `Strict`. pub fn is_strict(&self) -> bool { matches!(self, Self::Strict) } /// Returns `true` if the this mode is `Global`. pub fn is_global(&self) -> bool { matches!(self, Self::Global) } } #[derive(Debug, Trace, Finalize, PartialEq, Clone)] pub enum ConstructorKind { Base, Derived, } impl ConstructorKind { /// Returns `true` if the constructor kind is `Base`. pub fn is_base(&self) -> bool { matches!(self, Self::Base) } /// Returns `true` if the constructor kind is `Derived`. pub fn is_derived(&self) -> bool { matches!(self, Self::Derived) } } // We don't use a standalone `NativeObject` for `Captures` because it doesn't // guarantee that the internal type implements `Clone`. // This private trait guarantees that the internal type passed to `Captures` // implements `Clone`, and `DynClone` allows us to implement `Clone` for // `Box<dyn CapturesObject>`. trait CapturesObject: NativeObject + DynClone {} impl<T: NativeObject + Clone> CapturesObject for T {} dyn_clone::clone_trait_object!(CapturesObject); /// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional /// captures through a `Copy` closure. /// /// Any type implementing `Trace + Any + Debug + Clone` /// can be used as a capture context, so you can pass e.g. a String, /// a tuple or even a full struct. /// /// You can downcast to any type and handle the fail case as you like /// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref` /// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast /// fails. #[derive(Debug, Clone, Trace, Finalize)] pub struct Captures(Box<dyn CapturesObject>); impl Captures { /// Creates a new capture context. pub(crate) fn new<T>(captures: T) -> Self where T: NativeObject + Clone, { Self(Box::new(captures)) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or `None` otherwise. pub fn downcast_ref<T>(&self) -> Option<&T> where T: NativeObject + Clone, { self.0.deref().as_any().downcast_ref::<T>() } /// Mutably downcasts `Captures` to the specified type, returning a /// mutable reference to the downcasted type if successful or `None` otherwise. pub fn downcast_mut<T>(&mut self) -> Option<&mut T> where T: NativeObject + Clone, { self.0.deref_mut().as_mut_any().downcast_mut::<T>() } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T> where T: NativeObject + Clone, { self.0 .deref() .as_any() .downcast_ref::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } /// Downcasts `Captures` to the specified type, returning a reference to the /// downcasted type if successful or a `TypeError` otherwise. pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T> where T: NativeObject + Clone, { self.0 .deref_mut() .as_mut_any() .downcast_mut::<T>() .ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type")) } } /// Boa representation of a Function Object. /// /// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node) /// /// <https://tc39.es/ecma262/#sec-ecmascript-function-objects> #[derive(Clone, Trace, Finalize)] pub enum Function { Native { #[unsafe_ignore_trace] function: NativeFunctionSignature, constructable: bool, }, Closure { #[unsafe_ignore_trace] function: Box<dyn ClosureFunctionSignature>, constructable: bool, captures: Captures, }, Ordinary { constructable: bool, this_mode: ThisMode, body: RcStatementList, params: Box<[FormalParameter]>, environment: Environment, }, #[cfg(feature = "vm")] VmOrdinary { code: gc::Gc<crate::vm::CodeBlock>, environment: Environment, }, } impl fmt::Debug for Function { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Function {{... }}") } } impl Function { // Adds the final rest parameters to the Environment as an array #[cfg(not(feature = "vm"))] pub(crate) fn add_rest_param( param: &FormalParameter, index: usize, args_list: &[JsValue], context: &mut Context, local_env: &Environment, ) { use crate::builtins::Array; // Create array of values let array = Array::new_array(context); Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context) .unwrap(); // Create binding local_env // Function parameters can share names in JavaScript... .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding for rest param"); // Set Binding to value local_env .initialize_binding(param.name(), array, context) .expect("Failed to initialize rest param"); } // Adds an argument to the environment pub(crate) fn add_arguments_to_environment( param: &FormalParameter, value: JsValue, local_env: &Environment, context: &mut Context, ) { // Create binding local_env .create_mutable_binding(param.name(), false, true, context) .expect("Failed to create binding"); // Set Binding to value local_env .initialize_binding(param.name(), value, context) .expect("Failed to intialize binding"); } /// Returns true if the function object is constructable. pub fn is_constructable(&self) -> bool { match self { Self::Native { constructable,.. } => *constructable, Self::Closure { constructable,.. } => *constructable, Self::Ordinary { constructable,.. } => *constructable, #[cfg(feature = "vm")] Self::VmOrdinary { code,.. } => code.constructable, } } } /// Creates a new member function of a `Object` or `prototype`. /// /// A function registered using this macro can then be called from Javascript using: /// /// parent.name() /// /// See the javascript 'Number.toString()' as an example. /// /// # Arguments /// function: The function to register as a built in function. /// name: The name of the function (how it will be called but without the ()). /// parent: The object to register the function on, if the global object is used then the function is instead called as name() /// without requiring the parent, see parseInt() as an example. /// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that /// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with /// some other number of arguments. /// /// If no length is provided, the length will be set to 0. // TODO: deprecate/remove this. pub(crate) fn make_builtin_fn<N>( function: NativeFunctionSignature, name: N, parent: &JsObject, length: usize, interpreter: &Context, ) where N: Into<String>, { let name = name.into(); let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init"); let function = JsObject::from_proto_and_data( interpreter.standard_objects().function_object().prototype(), ObjectData::function(Function::Native { function, constructable: false, }), ); let attribute = PropertyDescriptor::builder() .writable(false) .enumerable(false) .configurable(true); function.insert_property("length", attribute.clone().value(length)); function.insert_property("name", attribute.value(name.as_str())); parent.clone().insert_property( name, PropertyDescriptor::builder() .value(function) .writable(true) .enumerable(false) .configurable(true), ); } #[derive(Debug, Clone, Copy)] pub struct BuiltInFunctionObject; impl BuiltInFunctionObject { pub const LENGTH: usize = 1; fn constructor( new_target: &JsValue, _: &[JsValue], context: &mut Context, ) -> JsResult<JsValue> { let prototype = get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?; let this = JsObject::from_proto_and_data( prototype, ObjectData::function(Function::Native { function: |_, _, _| Ok(JsValue::undefined()), constructable: true, }), ); Ok(this.into()) } fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> { Ok(JsValue::undefined()) } /// `Function.prototype.call` /// /// The call() method invokes self with the first argument as the `this` value. /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); // TODO?: 3. Perform PrepareForTailCall let start = if!args.is_empty() { 1 } else { 0 }; context.call(this, this_arg, &args[start..]) } /// `Function.prototype.apply` /// /// The apply() method invokes self with the first argument as the `this` value /// and the rest of the arguments provided as an array (or an array-like object). /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> { if!this.is_function() { return context.throw_type_error(format!("{} is not a function", this.display())); } let this_arg = args.get_or_undefined(0); let arg_array = args.get_or_undefined(1); if arg_array.is_null_or_undefined() { // TODO?: 3.a. PrepareForTailCall return context.call(this, this_arg, &[]); } let arg_list = arg_array.create_list_from_array_like(&[], context)?; // TODO?: 5. PrepareForTailCall context.call(this, this_arg, &arg_list) } #[allow(clippy::wrong_self_convention)] fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> { let name = { // Is there a case here where if there is no name field on a value // name should default to None? Do all functions have names set? let value = this.get_field("name", &mut *context)?; if value.is_null_or_undefined() { None } else { Some(value.to_string(context)?) } }; let function = { let object = this .as_object() .map(|object| object.borrow().as_function().cloned()); if let Some(Some(function)) = object { function } else
}; match (&function, name) { ( Function::Native { function: _, constructable: _, }, Some(name), ) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()), (Function::Ordinary { body, params,.. }, Some(name)) => { let arguments: String = params .iter() .map(|param| param.name()) .collect::<Vec<&str>>() .join(", "); let statement_list = &*body; // This is a kluge. The implementaion in browser seems to suggest that // the value here is printed exactly as defined in source. I'm not sure if // that's possible here, but for now here's a dumb heuristic that prints functions let is_multiline = { let value = statement_list.to_string(); value.lines().count() > 1 }; if is_multiline { Ok( //?? For some reason statement_list string implementation // sticks a \n at the end no matter what format!( "{}({}) {{\n{}}}", &name, arguments, statement_list.to_string() ) .into(), ) } else { Ok(format!( "{}({}) {{{}}}", &name, arguments, // The trim here is to remove a \n stuck at the end // of the statement_list to_string method statement_list.to_string().trim() ) .into()) } } _ => Ok("TODO".into()), } } } impl BuiltIn for BuiltInFunctionObject { const NAME: &'static str = "Function"; const ATTRIBUTE: Attribute = Attribute::WRITABLE .union(Attribute::NON_ENUMERABLE) .union(Attribute::CONFIGURABLE); fn init(context: &mut Context) -> JsValue { let _timer = BoaProfiler::global().start_event("function", "init"); let function_prototype = context.standard_objects().function_object().prototype(); FunctionBuilder::native(context, Self::prototype) .name("") .length(0) .constructable(false) .build_function_prototype(&function_prototype); let function_object = ConstructorBuilder::with_standard_object( context, Self::constructor, context.standard_objects().function_object().clone(), ) .name(Self::NAME) .length(Self::LENGTH) .method(Self::call, "call", 1) .method(Self::apply, "apply", 1) .method(Self::to_string, "toString", 0) .build(); function_object.into() } }
{ return context.throw_type_error("Not a function"); }
conditional_block