file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
wallet.rs | state.
fn persistent_state_address(
network: NetworkId,
master_xpriv: &bip32::ExtendedPrivKey,
) -> String {
let child = bip32::ChildNumber::from_hardened_idx(350).unwrap();
let child_xpriv = master_xpriv.derive_priv(&SECP, &[child]).unwrap();
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
match network {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => elements::Address::p2wpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
)
.to_string(),
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
}
}
/// Store the persistent wallet state.
fn save_persistent_state(&self) -> Result<(), Error> {
let state = PersistentWalletState {
next_external_child: self.next_external_child.get(),
next_internal_child: self.next_internal_child.get(),
};
let store_addr = Wallet::persistent_state_address(self.network.id(), &self.master_xpriv);
// Generic call for liquid compat.
self.rpc.call("setlabel", &[store_addr.into(), serde_json::to_string(&state)?.into()])?;
Ok(())
}
/// Load the persistent wallet state from the node.
#[allow(clippy::match_wild_err_arm)]
fn load_persistent_state(
rpc: &bitcoincore_rpc::Client,
state_addr: &str,
) -> Result<PersistentWalletState, Error> {
let info: Value = rpc.call("getaddressinfo", &[state_addr.into()])?;
match info.get("label") {
None => Err(Error::WalletNotRegistered),
Some(&Value::String(ref label)) => {
Ok(match serde_json::from_str::<PersistentWalletState>(label) {
Err(_) => panic!(
"corrupt persistent wallet state label (address: {}): {}",
state_addr, label
),
Ok(s) => s,
})
}
Some(_) => unreachable!(),
}
}
/// Calculates the bip32 seeds from the mnemonic phrase.
/// In order are returned:
/// - the master xpriv
/// - the external address xpriv
/// - the internal address xpriv
fn calc_xkeys(
seed: &[u8],
) -> (bip32::ExtendedPrivKey, bip32::ExtendedPrivKey, bip32::ExtendedPrivKey) {
// Network isn't of importance here.
let master_xpriv =
bip32::ExtendedPrivKey::new_master(BNetwork::Bitcoin, &seed[..]).unwrap();
// Add BIP-44 derivations for external and internal addresses.
let external_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/0").unwrap())
.unwrap();
let internal_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/1").unwrap())
.unwrap();
(master_xpriv, external_xpriv, internal_xpriv)
}
/// Register a new [Wallet].
pub fn register(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
// create the wallet in Core
let tmp_rpc = network.connect(None)?;
match tmp_rpc.create_wallet(fp.as_str(), Some(true))?.warning {
None => {}
Some(ref s) if s.is_empty() => {}
Some(warning) => {
warn!("Received warning when creating wallet {} in Core: {}", fp, warning,)
}
}
let rpc = network.connect(Some(&fp))?;
// Check if the user was already registered.
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
match Wallet::load_persistent_state(&rpc, &state_addr) {
Err(Error::WalletNotRegistered) => {} // good
Ok(_) => return Err(Error::WalletAlreadyRegistered),
Err(e) => {
warn!("Unexpected error while registering wallet: {}", e);
return Err(e);
}
}
let wallet = Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
next_internal_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
};
wallet.save_persistent_state()?;
Ok(wallet)
}
/// Login to an existing [Wallet].
pub fn login(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
let tmp_rpc = network.connect(None)?;
tmp_rpc.load_wallet(&fp)?;
let rpc = network.connect(Some(&fp))?;
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
let state = Wallet::load_persistent_state(&rpc, &state_addr)?;
Ok(Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(state.next_external_child),
next_internal_child: cell::Cell::new(state.next_internal_child),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
})
}
pub fn fingerprint(&self) -> bip32::Fingerprint {
self.master_xpriv.fingerprint(&SECP)
}
pub fn logout(self) -> Result<(), Error> {
self.rpc.unload_wallet(None)?;
Ok(())
}
pub fn mnemonic(&self) -> String {
self.mnemonic.clone()
}
fn derive_private_key(
&self,
fp: bip32::Fingerprint,
child: bip32::ChildNumber,
) -> Result<secp256k1::SecretKey, Error> {
let xpriv = if fp == self.external_xpriv.fingerprint(&SECP) {
self.external_xpriv
} else if fp == self.internal_xpriv.fingerprint(&SECP) {
self.internal_xpriv
} else {
error!("Address is labeled with unknown master xpriv fingerprint: {:?}", fp);
return Err(Error::CorruptNodeData);
};
let privkey = xpriv.derive_priv(&SECP, &[child])?.private_key;
Ok(privkey.key)
}
pub fn updates(&mut self) -> Result<Vec<Value>, Error> {
let mut msgs = vec![];
// check for new blocks
let tip = self.rpc.get_best_block_hash()?;
if self.tip!= Some(tip) {
let info: Value = self.rpc.call("getblock", &[tip.to_hex().into(), 1.into()])?;
msgs.push(json!({
"event": "block",
"block": {
"block_height": info["height"].as_u64().req()?,
"block_hash": tip.to_hex()
}
}));
self.tip = Some(tip);
}
// check for new transactions
// XXX does the app care about the transaction data in the event?
if let Some(last_tx) = self._get_transactions(1, 0)?.0.get(0) {
let txid = last_tx["txhash"].as_str().req()?;
let txid = sha256d::Hash::from_hex(txid)?;
if self.last_tx!= Some(txid) {
self.last_tx = Some(txid);
msgs.push(json!({ "event": "transaction", "transaction": last_tx }));
}
}
// update fees once every FEE_ESTIMATES_TTL
if self.cached_fees.1.elapsed() >= FEE_ESTIMATES_TTL {
self.cached_fees = (self._make_fee_estimates()?, Instant::now());
msgs.push(json!({ "event": "fees", "fees": self.cached_fees.0 }));
}
// TODO:
// {"event":"subaccount","subaccount":{"bits":"701144.66","btc":"0.70114466","fiat":"0.7712591260000000622741556099981585311432","fiat_currency":"EUR","fiat_rate":"1.10000000000000008881784197001252","has_transactions":true,"mbtc":"701.14466","name":"","pointer":0,"receiving_id":"GA3MQKVp6pP7royXDuZcw55F2TXTgg","recovery_chain_code":"","recovery_pub_key":"","satoshi":70114466,"type":"2of2","ubtc":"701144.66"}}
// XXX use zmq?
Ok(msgs)
}
pub fn get_account(&self) -> Result<Value, Error> {
let has_transactions = self._get_transactions(1, 0)?.1;
extend(
json!({
"type": "core",
"pointer": 0,
"receiving_id": "",
"name": "RPC wallet",
"has_transactions": has_transactions,
}),
self._get_balance(0)?,
)
}
pub fn get_balance(&self, details: &Value) -> Result<Value, Error> {
let min_conf = details["num_confs"].as_u64().req()? as u32;
self._get_balance(min_conf)
}
fn _get_balance(&self, min_conf: u32) -> Result<Value, Error> {
//TODO(stevenroose) implement in rust-bitcoincore-rpc once bitcoin::Amount lands
let mut args = vec![Value::Null, json!(min_conf), json!(true)];
#[cfg(feature = "liquid")]
{
if let NetworkId::Elements(net) = self.network.id() {
args.push(coins::liq::asset_hex(net).into());
}
}
let balance: f64 = self.rpc.call("getbalance", &args)?;
Ok(self._convert_satoshi(btc_to_usat(balance)))
}
pub fn get_transactions(&self, details: &Value) -> Result<Value, Error> {
let page = details["page_id"].as_u64().req()? as usize;
let (txs, potentially_has_more) = self._get_transactions(PER_PAGE, PER_PAGE * page)?;
Ok(json!({
"list": txs,
"page_id": page,
"next_page_id": if potentially_has_more { Some(page+1) } else { None },
}))
}
fn _get_transactions(&self, limit: usize, start: usize) -> Result<(Vec<Value>, bool), Error> {
// fetch listtranssactions
let txdescs: Vec<Value> = self
.rpc
.call("listtransactions", &["*".into(), limit.into(), start.into(), true.into()])?;
let potentially_has_more = txdescs.len() == limit;
// fetch full transactions and convert to GDK format
let mut txs = Vec::new();
for desc in txdescs.into_iter() {
let txid = sha256d::Hash::from_hex(desc["txid"].as_str().req()?)?;
let blockhash = &desc["blockhash"];
let tx_hex: String = self.rpc.call(
"getrawtransaction",
&[txid.to_hex().into(), false.into(), blockhash.clone()],
)?;
txs.push(format_gdk_tx(&desc, &hex::decode(&tx_hex)?, self.network.id())?);
}
Ok((txs, potentially_has_more))
}
pub fn get_transaction(&self, txid: &str) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txid)?;
let desc: Value = self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let raw_tx = hex::decode(desc["hex"].as_str().req()?)?;
format_gdk_tx(&desc, &raw_tx, self.network.id())
}
pub fn create_transaction(&self, details: &Value) -> Result<String, Error> |
pub fn sign_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("sign_transaction(): {:?}", details);
let change_address = self.next_address(&self.internal_xpriv, &self.next_internal_child)?;
// If we don't have any inputs, we can fail early.
let unspent: Vec<Value> = self.rpc.call("listunspent", &[0.into()])?;
if unspent.is_empty() {
return Err(Error::NoUtxosFound);
}
debug!("list_unspent: {:?}", unspent);
let raw_tx = match self.network.id() {
NetworkId::Bitcoin(_) => {
coins::btc::sign_transaction(&self.rpc, details, &change_address, |fp, child| {
self.derive_private_key(*fp, *child)
})?
}
NetworkId::Elements(net) => coins::liq::sign_transaction(
&self.rpc,
net,
details,
&change_address,
|fp, child| self.derive_private_key(*fp, *child),
)?,
};
let hex_tx = hex::encode(&raw_tx);
//TODO(stevenroose) remove when confident in signing code
let ret: Vec<Value> = self.rpc.call("testmempoolaccept", &[vec![hex_tx.clone()].into()])?;
let accept = ret.into_iter().next().unwrap();
if!(accept["allowed"].as_bool().req()?) {
error!(
"sign_transaction(): signed tx is not valid: {}",
accept["reject-reason"].as_str().req()?
);
// TODO(stevenroose) should we return an error??
}
Ok(hex_tx)
}
pub fn send_transaction(&self, details: &Value) -> Result<String, Error> {
let tx_hex = details["hex"].as_str().req()?;
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
pub fn send_raw_transaction(&self, tx_hex: &str) -> Result<String, Error> {
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
/// Return the next address for the derivation and import it in Core.
fn next_address(
&self,
xpriv: &bip32::ExtendedPrivKey,
child: &cell::Cell<bip32::ChildNumber>,
) -> Result<String, Error> {
let child_xpriv = xpriv.derive_priv(&SECP, &[child.get()])?;
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
let meta = AddressMeta {
fingerprint: Some(xpriv.fingerprint(&SECP)),
child: Some(child.get()),
..Default::default()
};
let address_str = match self.network.id() {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => {
let mut addr = elements::Address::p2shwpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
);
let blinding_key = wally::asset_blinding_key_to_ec_private_key(
&self.master_blinding_key,
&addr.script_pubkey(),
);
let blinding_pubkey = secp256k1::PublicKey::from_secret_key(&SECP, &blinding_key);
addr.blinding_pubkey = Some(blinding_pubkey);
// Store blinding privkey in the node.
let addr_str = addr.to_string();
coins::liq::store_blinding_key(&self.rpc, &addr_str, &blinding_key)?;
addr_str
}
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
};
// Since this is a newly generated address, rescanning is not required.
self.rpc.import_public_key(&child_xpub.public_key, Some(&meta.to_label()?), Some(false))?;
child.set(match child.get() {
bip32::ChildNumber::Normal {
index,
} => bip32::ChildNumber::from_normal_idx(index + 1)?,
_ => unreachable!(),
});
self.save_persistent_state()?;
Ok(address_str)
}
pub fn get_receive_address(&self, _details: &Value) -> Result<Value, Error> {
let address = self.next_address(&self.external_xpriv, &self.next_external_child)?;
// {
// "address": "2N2x4EgizS2w3DUiWYWW9pEf4sGYRfo6PAX",
// "address_type": "p2wsh",
// "branch": 1,
// "pointer": 13,
// "script": "52210338832debc5e15ce143d5cf9241147ac0019e7516d3d9569e04b0e18f3278718921025dfaa85d64963252604e1b139b40182bb859a9e2e1aa2904876c34e82158d85452ae",
// "script_type": 14,
// "subtype": null
// }
Ok(json!({
"address": address,
"address_type": "p2wpkh",
}))
}
pub fn get_fee_estimates(&self) -> Option<&Value> {
// will not be available before the first "tick", which should
| {
debug!("create_transaction(): {:?}", details);
let unfunded_tx = match self.network.id() {
NetworkId::Bitcoin(..) => coins::btc::create_transaction(&self.rpc, details)?,
NetworkId::Elements(..) => coins::liq::create_transaction(&self.rpc, details)?,
};
debug!("create_transaction unfunded tx: {:?}", hex::encode(&unfunded_tx));
// TODO explicit handling for id_no_amount_specified id_fee_rate_is_below_minimum id_invalid_replacement_fee_rate
// id_send_all_requires_a_single_output
Ok(hex::encode(unfunded_tx))
} | identifier_body |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn | (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
}
| main_loop | identifier_name |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0, | }
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
} | display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
} | random_line_split |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type!= ObjectType::Beast |
if varying.awareness_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
}
}
}
| {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}} | conditional_block |
ui.rs | use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::cmp::{min, max};
use stdweb::web;
use stdweb::unstable::TryInto;
use nalgebra::{Vector2};
use time_steward::{DeterministicRandomId};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Accessor, simple_timeline};
use self::simple_timeline::{query_ref};
use steward_module::bbox_collision_detection_2d::Detector;
pub struct Game {
pub steward: Steward,
pub now: Time,
pub last_ui_time: f64,
pub time_speed: f64,
pub display_center: Vector,
pub display_radius: Coordinate,
pub selected_object: Option <ObjectHandle>,
}
pub fn make_game(seed_id: DeterministicRandomId)->Game {
let mut steward: Steward = Steward::from_globals (Globals::default());
steward.insert_fiat_event (0, seed_id, Initialize {}).unwrap();
Game {
steward: steward,
now: 1,
last_ui_time: 0.0,
time_speed: 1.0,
display_center: Vector::new (0, 0),
display_radius: INITIAL_PALACE_DISTANCE*3/2,
selected_object: None,
}
}
pub fn draw_game <A: Accessor <Steward = Steward>>(accessor: &A, game: & Game) {
let canvas_width: f64 = js! {return canvas.width;}.try_into().unwrap();
let scale = canvas_width/(game.display_radius as f64*2.0);
js! {
var size = Math.min (window.innerHeight, window.innerWidth);
canvas.setAttribute ("width", size);
canvas.setAttribute ("height", size);
context.clearRect (0, 0, canvas.width, canvas.height);
context.save();
context.scale (@{scale},@{scale});
context.translate (@{-game.display_center [0] as f64}, @{-game.display_center [1] as f64});
context.translate (@{game.display_radius as f64}, @{game.display_radius as f64});
}
for object in Detector::objects_near_box (accessor, & get_detector (accessor), BoundingBox::centered (to_collision_vector (if game.display_radius > INITIAL_PALACE_DISTANCE*2 { Vector::new(0,0) } else { game.display_center }), min (game.display_radius, INITIAL_PALACE_DISTANCE*2) as u64), None) {
let varying = query_ref (accessor, & object.varying);
let center = varying.trajectory.evaluate (*accessor.now());
let center = Vector2::new (center [0] as f64, center [1] as f64);
let object_radius = radius (& varying) as f64;
let selected = game.selected_object.as_ref() == Some(&object);
//println!("{:?}", (varying.trajectory, center, object_radius));
js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, Math.PI*2);
context.strokeStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",1.0)";
context.lineWidth = @{object_radius/30.0 + if selected {1.5} else {0.5}/scale};
context.stroke();
if (@{varying.team == 1}) {
context.fillStyle = "rgba(42,0,0,0.2)";
context.fill();
}
}
if let Some(action) = varying.synchronous_action.as_ref() {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{object_radius}, 0, @{action.progress.evaluate (*accessor.now()) as f64/action.finish_cost as f64}*Math.PI*2);
context.fillStyle = "rgba("+@{varying.team as i32*255/6}+",0,"+@{(1-varying.team as i32)*255}+",0.2)";
context.fill();
}}
if varying.attack_range >0 {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.attack_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.interrupt_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.interrupt_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if varying.awareness_range >0 && varying.object_type!= ObjectType::Beast {js! {
context.beginPath();
context.arc (@{center [0]},@{center [1]},@{varying.awareness_range as f64}, 0, Math.PI*2);
context.lineWidth = @{0.3/scale};
context.stroke();
}}
if let Some(home) = varying.home.as_ref() {
let home_center = query_ref (accessor, & home.varying).trajectory.evaluate (*accessor.now());
let home_center = Vector2::new (home_center [0] as f64, home_center [1] as f64);
js! {
context.beginPath();
context.moveTo(@{center [0]},@{center [1]});
context.lineTo(@{home_center [0]},@{home_center [1]});
context.lineWidth = @{0.25/scale};
context.setLineDash([@{3.0/scale},@{3.0/scale}]);
context.stroke();
context.setLineDash([]);
}
}
}
if let Some(selected) = game.selected_object.as_ref() {
let varying = query_ref (accessor, & selected.varying);
js! {
selected_info.empty().append ( //.text (@{format!("{:?}", **selected)});
$("<div>").text(@{format!("{:?}", varying.object_type)}),
$("<div>").text(@{if varying.hitpoints == 0 { format!("Food: ~{}/{}", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND,varying.food_cost/STANDARD_FOOD_UPKEEP_PER_SECOND)} else { format!("Food: ~{} ({} available)", varying.food/STANDARD_FOOD_UPKEEP_PER_SECOND, (varying.food - reserved_food(accessor, selected))/STANDARD_FOOD_UPKEEP_PER_SECOND)}}),
$("<div>").text(@{format!("HP: {}/{}", varying.hitpoints, varying.max_hitpoints)}),
$("<div>").text(@{format!("Endurance: {}%", varying.endurance.evaluate (*accessor.now())*100/max(1, varying.max_endurance))}),
$("<div>").text(@{
match varying.synchronous_action {
None => format!("Action: {:?}", varying.synchronous_action),
Some (ref synchronous) => match varying.ongoing_action {
None => format!("Action: {:?}", synchronous.action_type),
Some (ref ongoing) => format!("Action: {:?}/{:?}", synchronous.action_type, ongoing),
},
}
})
);}
//let choices = analyzed_choices (accessor, & selected);
if let Some(choices) = varying.last_choices.as_ref() {
for choice in choices.iter() {
js! {selected_info.append ($("<div>").text(@{format!("{:?}", choice)}));}
}
}
//js! {selected_info.append ($("<div>").text(@{format!("{:?}", **selected)}));}
}
js! {
context.restore();
}
}
pub fn main_loop (time: f64, game: Rc<RefCell<Game>>) {
//let continue_simulating;
{
let mut game = game.borrow_mut();
let observed_duration = time - game.last_ui_time;
let duration_to_simulate = if observed_duration < 100.0 {observed_duration} else {100.0};
let duration_to_simulate = (duration_to_simulate*(SECOND as f64)*game.time_speed/1000.0) as Time;
assert!(duration_to_simulate >= 0) ;
game.last_ui_time = time;
game.now += duration_to_simulate;
let now = game.now.clone();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
draw_game (& snapshot, & game);
game.steward.forget_before (&now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().map (| object | query_ref (& snapshot, & object.varying).team).collect();
continue_simulating = teams_alive.len() > 1;*/
}
//if continue_simulating {
web::window().request_animation_frame (move | time | main_loop (time, game));
//}
}
#[cfg (target_os = "emscripten")]
pub fn run() {
stdweb::initialize();
js! {
var game_container = window.game_container = $("<div>");
var canvas = window.canvas = document.createElement ("canvas");
(document.querySelector("main") || document.body).appendChild (game_container[0]);
game_container.append(canvas);
window.context = canvas.getContext ("2d");
}
let seed: u32 = js!{return window.localStorage && parseInt(window.localStorage.getItem ("random_seed")) || 0}.try_into().unwrap();
let game = Rc::new (RefCell::new (make_game(DeterministicRandomId::new (& (seed, 0xae06fcf3129d0685u64)))));
{
let game = game.clone();
let wheel_callback = move |x: f64,y: f64, delta: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let factor = (1.003f64).powf(delta);
game.display_radius = (game.display_radius as f64*factor) as Coordinate;
let modified_offset = offset*factor;
let difference = offset - modified_offset;
game.display_center += Vector2::new (difference [0] as Coordinate, difference [1] as Coordinate);
//println!("{:?}", (x,y,game.display_center));
};
js! {
var callback = @{wheel_callback};
canvas.addEventListener ("wheel", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5,
event.deltaY
);
event.preventDefault();
});
}
}
{
let game = game.clone();
let time_callback = move |speed: f64| {
let mut game = game.borrow_mut();
game.time_speed = if speed == -10.0 { 0.0 } else { (2.0f64).powf(speed/2.0) };
println!("{:?}", (speed));
};
js! {
var callback = @{time_callback};
game_container.append($("<div>").append(
$("<input>", {
type: "range",
id: "time_speed",
value: 0, min: -10, max: 10, step: 1
}).on ("input", function (event) {
callback(event.target.valueAsNumber);
}),
$("<label>", {
for: "time_speed",
text: " time speed",
})
));
}
}
{
let game = game.clone();
let click_callback = move |x: f64,y: f64 | {
let mut game = game.borrow_mut();
let offset = Vector2::new (
x*game.display_radius as f64*2.0,
y*game.display_radius as f64*2.0
);
let location = game.display_center + Vector2::new (offset [0] as Coordinate, offset [1] as Coordinate);
let now = game.now;
//game.steward.insert_fiat_event (now, DeterministicRandomId::new (& (now)), ChangeOrders {team: 1, orders: Orders {unit_destination: Some (location)}}).unwrap();
let snapshot = game.steward.snapshot_before (&now). unwrap ();
for object in Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (location), 0), None) {
let varying = query_ref (& snapshot, & object.varying);
let center = varying.trajectory.evaluate (now);
let object_radius = radius (& varying);
if distance (location, center) < object_radius {
game.selected_object = Some (object.clone());
}
}
};
js! {
var callback = @{click_callback};
canvas.addEventListener ("click", function (event) {
var offset = canvas.getBoundingClientRect();
callback (
(event.clientX - offset.left)/offset.width - 0.5,
(event.clientY - offset.top)/offset.height - 0.5
);
event.preventDefault();
});
}
}
js! {
game_container.append($("<div>").append(
$("<input>", {
type: "number",
id: "seed",
value:@{seed},
min: 0,
max: @{0u32.wrapping_sub(1)}
}).on ("input", function (event) {
var value = Math.floor(event.target.valueAsNumber);
if (value >= 0 && value <= @{0u32.wrapping_sub(1)}) {
window.localStorage.setItem ("random_seed", value);
}
}),
$("<label>", {
for: "seed",
text: " random seed (reload page to apply)",
})
));
game_container.append(window.selected_info = $("<div>", {id: "selected_info"}));
}
web::window().request_animation_frame (move | time | main_loop (time, game));
stdweb::event_loop();
}
#[cfg (not(target_os = "emscripten"))]
pub fn run() | }
}
| {
let mut scores = [0; 2];
loop {
let mut game = make_game(DeterministicRandomId::new (& (scores, 0xae06fcf3129d0685u64)));
loop {
game.now += SECOND /100;
let snapshot = game.steward.snapshot_before (& game.now). unwrap ();
game.steward.forget_before (& game.now);
/*let teams_alive: std::collections::HashSet <_> = Detector::objects_near_box (& snapshot, & get_detector (& snapshot), BoundingBox::centered (to_collision_vector (Vector::new (0, 0)), INITIAL_PALACE_DISTANCE as u64*2), None).into_iter().filter (| object | is_building (&query_ref (& snapshot, & object.varying))).map (| object | query_ref (& snapshot, & object.varying).team).collect();
if teams_alive.len() <= 1 {
scores [teams_alive.into_iter().next().unwrap()] += 1;
println!("{:?}", scores);
break;
}*/
} | identifier_body |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root()!= base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None, | }
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if!k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable +'static { }
impl<F: Clone +'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait; | (Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir); | random_line_split |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root()!= base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn bool_from_str_or_int<'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> |
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if!k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable +'static { }
impl<F: Clone +'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait;
| {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
} | identifier_body |
util.rs | //! Useful functions and macros for writing figments.
//!
//! # `map!` macro
//!
//! The `map!` macro constructs a [`Map`](crate::value::Map) from key-value
//! pairs and is particularly useful during testing:
//!
//! ```rust
//! use figment::util::map;
//!
//! let map = map! {
//! "name" => "Bob",
//! "age" => "100"
//! };
//!
//! assert_eq!(map.get("name"), Some(&"Bob"));
//! assert_eq!(map.get("age"), Some(&"100"));
//!
//! let map = map! {
//! 100 => "one hundred",
//! 23 => "twenty-three"
//! };
//!
//! assert_eq!(map.get(&100), Some(&"one hundred"));
//! assert_eq!(map.get(&23), Some(&"twenty-three"));
//!
//! ```
use std::fmt;
use std::path::{Path, PathBuf, Component};
use serde::de::{self, Unexpected, Deserializer};
/// A helper function to determine the relative path to `path` from `base`.
///
/// Returns `None` if there is no relative path from `base` to `path`, that is,
/// `base` and `path` do not share a common ancestor. `path` and `base` must be
/// either both absolute or both relative; returns `None` if one is relative and
/// the other absolute.
///
/// ```
/// use std::path::Path;
/// use figment::util::diff_paths;
///
/// // Paths must be both relative or both absolute.
/// assert_eq!(diff_paths("/a/b/c", "b/c"), None);
/// assert_eq!(diff_paths("a/b/c", "/b/c"), None);
///
/// // The root/relative root is always a common ancestor.
/// assert_eq!(diff_paths("/a/b/c", "/b/c"), Some("../../a/b/c".into()));
/// assert_eq!(diff_paths("c/a", "b/c/a"), Some("../../../c/a".into()));
///
/// let bar = "/foo/bar";
/// let baz = "/foo/bar/baz";
/// let quux = "/foo/bar/quux";
///
/// assert_eq!(diff_paths(bar, baz), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// assert_eq!(diff_paths(quux, baz), Some("../quux".into()));
/// assert_eq!(diff_paths(baz, quux), Some("../baz".into()));
/// assert_eq!(diff_paths(bar, quux), Some("../".into()));
/// assert_eq!(diff_paths(baz, bar), Some("baz".into()));
/// ```
// Copyright 2012-2015 The Rust Project Developers.
// Copyright 2017 The Rust Project Developers.
// Adapted from `pathdiff`, which itself adapted from rustc's path_relative_from.
pub fn diff_paths<P, B>(path: P, base: B) -> Option<PathBuf>
where P: AsRef<Path>, B: AsRef<Path>
{
let (path, base) = (path.as_ref(), base.as_ref());
if path.has_root()!= base.has_root() {
return None;
}
let mut ita = path.components();
let mut itb = base.components();
let mut comps: Vec<Component> = vec![];
loop {
match (ita.next(), itb.next()) {
(None, None) => break,
(Some(a), None) => {
comps.push(a);
comps.extend(ita.by_ref());
break;
}
(None, _) => comps.push(Component::ParentDir),
(Some(a), Some(b)) if comps.is_empty() && a == b => (),
(Some(a), Some(b)) if b == Component::CurDir => comps.push(a),
(Some(_), Some(b)) if b == Component::ParentDir => return None,
(Some(a), Some(_)) => {
comps.push(Component::ParentDir);
for _ in itb {
comps.push(Component::ParentDir);
}
comps.push(a);
comps.extend(ita.by_ref());
break;
}
}
}
Some(comps.iter().map(|c| c.as_os_str()).collect())
}
/// A helper to deserialize `0/false` as `false` and `1/true` as `true`.
///
/// Serde's default deserializer for `bool` only parses the strings `"true"` and
/// `"false"` as the booleans `true` and `false`, respectively. By contract,
/// this function _case-insensitively_ parses both the strings `"true"/"false"`
/// and the integers `1/0` as the booleans `true/false`, respectively.
///
/// # Example
///
/// ```rust
/// use figment::Figment;
///
/// #[derive(serde::Deserialize)]
/// struct Config {
/// #[serde(deserialize_with = "figment::util::bool_from_str_or_int")]
/// cli_colors: bool,
/// }
///
/// let c0: Config = Figment::from(("cli_colors", "true")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "TRUE")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 1)).extract().unwrap();
/// assert_eq!(c0.cli_colors, true);
/// assert_eq!(c1.cli_colors, true);
/// assert_eq!(c2.cli_colors, true);
///
/// let c0: Config = Figment::from(("cli_colors", "false")).extract().unwrap();
/// let c1: Config = Figment::from(("cli_colors", "fAlSe")).extract().unwrap();
/// let c2: Config = Figment::from(("cli_colors", 0)).extract().unwrap();
/// assert_eq!(c0.cli_colors, false);
/// assert_eq!(c1.cli_colors, false);
/// assert_eq!(c2.cli_colors, false);
/// ```
pub fn | <'de, D: Deserializer<'de>>(de: D) -> Result<bool, D::Error> {
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = bool;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a boolean")
}
fn visit_str<E: de::Error>(self, val: &str) -> Result<bool, E> {
match val {
v if uncased::eq(v, "true") => Ok(true),
v if uncased::eq(v, "false") => Ok(false),
s => Err(E::invalid_value(Unexpected::Str(s), &"true or false"))
}
}
fn visit_u64<E: de::Error>(self, n: u64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Unsigned(n), &"0 or 1"))
}
}
fn visit_i64<E: de::Error>(self, n: i64) -> Result<bool, E> {
match n {
0 | 1 => Ok(n!= 0),
n => Err(E::invalid_value(Unexpected::Signed(n), &"0 or 1"))
}
}
fn visit_bool<E: de::Error>(self, b: bool) -> Result<bool, E> {
Ok(b)
}
}
de.deserialize_any(Visitor)
}
/// A helper to serialize and deserialize a map as a vector of `(key, value)`
/// pairs.
///
/// ```
/// use figment::{Figment, util::map};
/// use serde::{Serialize, Deserialize};
///
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct Config {
/// #[serde(with = "figment::util::vec_tuple_map")]
/// pairs: Vec<(String, usize)>
/// }
///
/// let map = map!["key" => 1, "value" => 100, "name" => 20];
/// let c: Config = Figment::from(("pairs", map)).extract().unwrap();
/// assert_eq!(c.pairs.len(), 3);
///
/// let mut pairs = c.pairs;
/// pairs.sort_by_key(|(_, v)| *v);
///
/// assert_eq!(pairs[0], ("key".into(), 1));
/// assert_eq!(pairs[1], ("name".into(), 20));
/// assert_eq!(pairs[2], ("value".into(), 100));
/// ```
pub mod vec_tuple_map {
use std::fmt;
use serde::{de, Deserialize, Serialize, Deserializer, Serializer};
/// The serializer half.
pub fn serialize<S, K, V>(vec: &[(K, V)], se: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Serialize, V: Serialize
{
se.collect_map(vec.iter().map(|(ref k, ref v)| (k, v)))
}
/// The deserializer half.
pub fn deserialize<'de, K, V, D>(de: D) -> Result<Vec<(K, V)>, D::Error>
where D: Deserializer<'de>, K: Deserialize<'de>, V: Deserialize<'de>
{
struct Visitor<K, V>(std::marker::PhantomData<Vec<(K, V)>>);
impl<'de, K, V> de::Visitor<'de> for Visitor<K, V>
where K: Deserialize<'de>, V: Deserialize<'de>,
{
type Value = Vec<(K, V)>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Vec<(K, V)>, A::Error>
where A: de::MapAccess<'de>
{
let mut vec = Vec::with_capacity(map.size_hint().unwrap_or(0));
while let Some((k, v)) = map.next_entry()? {
vec.push((k, v));
}
Ok(vec)
}
}
de.deserialize_map(Visitor(std::marker::PhantomData))
}
}
use crate::value::{Value, Dict};
/// Given a key path `key` of the form `a.b.c`, creates nested dictionaries for
/// for every path component delimited by `.` in the path string (3 in `a.b.c`),
/// each a parent of the next, and the leaf mapping to `value` (`a` -> `b` ->
/// `c` -> `value`).
///
/// If `key` is empty, simply returns `value`. Otherwise, `Value` will be a
/// dictionary with the nested mappings.
///
/// # Example
///
/// ```rust
/// use figment::{util::nest, value::Value};
///
/// let leaf = Value::from("I'm a leaf!");
///
/// let dict = nest("tea", leaf.clone());
/// assert_eq!(dict.find_ref("tea").unwrap(), &leaf);
///
/// let dict = nest("tea.leaf", leaf.clone());
/// let tea = dict.find_ref("tea").unwrap();
/// let found_leaf = tea.find_ref("leaf").unwrap();
/// assert_eq!(found_leaf, &leaf);
/// assert_eq!(dict.find_ref("tea.leaf").unwrap(), &leaf);
///
/// let just_leaf = nest("", leaf.clone());
/// assert_eq!(just_leaf, leaf);
/// ```
pub fn nest(key: &str, value: Value) -> Value {
fn value_from(mut keys: std::str::Split<'_, char>, value: Value) -> Value {
match keys.next() {
Some(k) if!k.is_empty() => {
let mut dict = Dict::new();
dict.insert(k.into(), value_from(keys, value));
dict.into()
}
Some(_) | None => value
}
}
value_from(key.split('.'), value)
}
#[doc(hidden)]
#[macro_export]
/// This is a macro.
macro_rules! map {
($($key:expr => $value:expr),* $(,)?) => ({
let mut map = $crate::value::Map::new();
$(map.insert($key, $value);)*
map
});
}
pub use map;
#[doc(hidden)]
#[macro_export]
macro_rules! make_cloneable {
($Trait:path: $Cloneable:ident) => {
trait $Cloneable {
fn box_clone(&self) -> Box<dyn $Trait>;
}
impl std::clone::Clone for Box<dyn $Trait> {
fn clone(&self) -> Box<dyn $Trait> {
(&**self).box_clone()
}
}
impl std::fmt::Debug for Box<dyn $Trait> {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<T: $Trait + Clone> $Cloneable for T {
fn box_clone(&self) -> Box<dyn $Trait> {
Box::new(self.clone())
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! cloneable_fn_trait {
($Name:ident: $($rest:tt)*) => {
trait $Name: $($rest)* + Cloneable +'static { }
impl<F: Clone +'static> $Name for F where F: $($rest)* { }
$crate::make_cloneable!($Name: Cloneable);
}
}
pub(crate) use cloneable_fn_trait;
| bool_from_str_or_int | identifier_name |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize.. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> |
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| {
let literal = between(char('/'), char('/'), many(satisfy(|c| c != '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
} | identifier_body |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else |
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize.. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c!= '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
} | conditional_block |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize.. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0); | }
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn link(&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c!= '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
} | l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len()); | random_line_split |
main.rs | #![feature(collections)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate parser_combinators;
// TODO:
// - Benchmarks
// - Cache of last piece
// - merge consecutive insert, delete
// - snapshots
// - Allow String, &str, &[u8], and Vec<u8> as parameter to insert, append
/// A append only buffer
/// (This is unnecessary inefficient because we copy, we could
/// just allocate new separate buffers)
#[derive(Debug)]
pub struct AppendOnlyBuffer {
buf: Vec<u8>,
}
#[derive(Debug,Copy,Clone,PartialEq)]
pub struct Span {
off1: u32,
off2: u32,
}
impl Span {
pub fn new(off1: u32, off2: u32) -> Span {
assert!(off2 >= off1);
Span { off1: off1, off2: off2 }
}
/// The empty span
pub fn empty() -> Span {
Span::new(0,0)
}
pub fn len(&self) -> u32 {
self.off2 - self.off1
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Split self such that the left piece has n characters.
pub fn split(&self, n: u32) -> Option<(Span, Span)> {
if n == 0 || n == self.len() {
None
} else {
Some((Span::new(self.off1, self.off1+n), Span::new(self.off1+n, self.off2)))
}
}
}
impl AppendOnlyBuffer {
/// Constructs a new, empty AppendOnlyBuffer.
pub fn new() -> AppendOnlyBuffer {
AppendOnlyBuffer {
buf: Vec::with_capacity(4096)
}
}
/// Append a slice of bytes.
pub fn append(&mut self, bytes: &[u8]) -> Span {
let off1 = self.buf.len() as u32;
self.buf.push_all(bytes);
Span::new(off1, self.buf.len() as u32)
}
pub fn get(&self, s: Span) -> &[u8] {
&self.buf[s.off1 as usize.. s.off2 as usize]
}
pub fn get_byte(&self, p: u32) -> u8 {
self.buf[p as usize]
}
}
/// We represent pieces by their index in the vector that we use to allocate
/// them. That is fine because we never free a piece anyway (unlimited undo
/// for the win).
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Piece(u32);
/// The actual data stored in a piece.
/// We have one sentinel piece which is always stored at index 0
/// in the vector. It's span is also empty
#[derive(Debug)]
struct PieceData {
/// Some bytes in the text's buffer
span: Span,
prev: Piece,
next: Piece,
}
/// Text is just a sequence of bytes (implemented with the PieceTable method,
/// ala Oberon). We on purpose do not require UTF-8 here. A programmers
/// editor is most useful when it can deal with any sequence of bytes.
#[derive(Debug)]
pub struct Text {
buffer: AppendOnlyBuffer,
pieces: Vec<PieceData>,
len: usize,
}
struct Pieces<'a> {
text: &'a Text,
next: Piece,
/// start position of piece in text
off: u32,
}
impl<'a> Iterator for Pieces<'a> {
type Item = (u32, Piece);
fn next(&mut self) -> Option<(u32, Piece)> {
if self.next == SENTINEL {
None
} else {
let piece = self.next;
let Piece(p) = piece;
let pd = &self.text.pieces[p as usize];
let off = self.off;
let span = &pd.span;
let next = *&pd.next;
self.off = self.off + span.len();
self.next = next;
Some ((off, piece))
}
}
}
pub struct Bytes<'a> {
pieces: Pieces<'a>,
pd: Option<&'a PieceData>,
// where we are in the current piece
off: u32
}
impl<'a> Iterator for Bytes<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
match self.pd {
None => None,
Some(pd) => {
let span = pd.span;
if self.off >= span.len() {
self.off = 0;
self.pd = self.pieces.next().map(|(_, p)| self.pieces.text.get_piece(p));
self.next()
} else {
let byte = self.pieces.text.buffer.get_byte(span.off1 + self.off);
self.off += 1;
Some(byte)
}
}
}
}
}
// The sentinel is always stored at position 0 in the pieces vector
const SENTINEL: Piece = Piece(0);
impl Text {
pub fn new() -> Text {
Text {
buffer: AppendOnlyBuffer::new(),
pieces: vec![PieceData {
span: Span::empty(),
prev: SENTINEL,
next: SENTINEL,
}],
len: 0,
}
}
fn invariant(&self) {
let mut l = 0;
let mut p = self.get_piece(SENTINEL).next;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).next;
}
assert_eq!(l as usize, self.len());
let mut l = 0;
let mut p = self.get_piece(SENTINEL).prev;
while p!= SENTINEL {
let len = self.get_piece(p).span.len();
assert!(len > 0);
l += len;
p = self.get_piece(p).prev;
}
assert_eq!(l as usize, self.len());
}
/// Iterator over all pieces (but never the sentinel)
fn pieces(&self) -> Pieces {
let next = self.get_piece(SENTINEL).next;
Pieces {
text: self,
next: next,
off: 0,
}
}
/// Length of Text in bytes
pub fn len(&self) -> usize {
self.len
}
/// Iterator over all bytes
pub fn bytes(&self) -> Bytes {
let mut pieces = self.pieces();
let pd = pieces.next().map(|(_, p)| self.get_piece(p));
Bytes {
pieces: pieces,
pd: pd,
off: 0
}
}
fn get_piece(&self, Piece(p): Piece) -> &PieceData {
&self.pieces[p as usize]
}
fn | (&mut self, piece1: Piece, piece2: Piece) {
let Piece(p1) = piece1;
let Piece(p2) = piece2;
self.pieces[p1 as usize].next = piece2;
self.pieces[p2 as usize].prev = piece1;
}
/// Find the piece containing offset. Return piece
/// and start position of piece in text.
/// Will return the sentinel iff off == self.len()
/// Returns the right piece if off between two
/// pieces
fn find_piece(&self, off:u32) -> (u32, Piece) {
if off == self.len() as u32 {
(off, SENTINEL)
} else {
let mut start = 0;
let mut piece = SENTINEL;
for (s, p) in self.pieces() {
if s > off {
// previous piece was the one we wanted
return (start, piece);
}
start = s;
piece = p;
}
return (start, piece);
}
}
fn add_piece(&mut self, span: Span) -> Piece {
self.pieces.push(PieceData {
span: span,
prev: SENTINEL,
next: SENTINEL,
} );
Piece((self.pieces.len() - 1) as u32)
}
/// Delete bytes between off1 (inclusive) and off2 (exclusive)
pub fn delete(&mut self, off1: u32, off2: u32) {
if off2 <= off1 {
return;
}
let (lstart, lpiece) = self.find_piece(off1);
let lspan = self.get_piece(lpiece).span;
let (rstart, rpiece) = self.find_piece(off2);
let rspan = self.get_piece(rpiece).span;
let left = {
if let Some((left_span, _right_span)) = lspan.split(off1 - lstart) {
let l = self.get_piece(lpiece).prev;
let remainder = self.add_piece(left_span);
self.link(l, remainder);
remainder
} else {
// We are deleting all of piece
assert_eq!(lstart, off1);
self.get_piece(lpiece).prev
}
};
let right = {
if let Some((_left_span, right_span)) = rspan.split(off2 - rstart) {
let r = self.get_piece(rpiece).next;
let remainder = self.add_piece(right_span);
self.link(remainder, r);
remainder
} else {
// We are at the beginning of piece and therefore
// won't delete anything of it
assert_eq!(rstart, off2);
rpiece
}
};
self.len -= (off2 - off1) as usize;
self.link(left, right);
self.invariant()
}
/// Append bytes at end.
pub fn append(&mut self, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let off = self.len() as u32;
self.insert(off, bytes);
}
/// Insert bytes at offset.
pub fn insert(&mut self, off:u32, bytes: &[u8]) {
if bytes.len() == 0 {
return;
}
let (start, piece) = self.find_piece(off);
let (span, prev, next) = {
let d = self.get_piece(piece);
(d.span, d.prev, d.next)
};
if let Some((left_span, right_span)) = span.split(off - start) {
let left = self.add_piece(left_span);
let span = self.buffer.append(bytes);
let middle = self.add_piece(span);
let right = self.add_piece(right_span);
self.link(prev, left);
self.link(left, middle);
self.link(middle, right);
self.link(right, next);
} else {
// insert at beginning aka in front of the piece
assert_eq!(start, off);
let span = self.buffer.append(bytes);
let p = self.add_piece(span);
self.link(p, piece);
self.link(prev, p);
}
self.len = self.len + bytes.len();
self.invariant();
}
pub fn to_vec(&self) -> Vec<u8> {
let mut v = Vec::new();
for (_, p) in self.pieces() {
v.push_all(self.buffer.get(self.get_piece(p).span))
}
v
}
pub fn to_utf8_string(&self) -> Result<String, std::string::FromUtf8Error> {
String::from_utf8(self.to_vec())
}
}
#[test]
fn test_pieces() {
let t = Text::new();
assert_eq!(t.pieces().collect::<Vec<_>>(), vec![]);
}
#[cfg(test)]
mod tests {
mod span {
use super::super::*;
#[test]
fn basics() {
let s = Span::new(1, 1);
assert_eq!(s.len(), 0);
assert!(s.is_empty());
let s2 = Span::new(3, 7);
assert!(s2.len() == 4);
}
#[test]
fn split() {
let s = Span::new(3, 7);
assert_eq!(s.split(0), None);
assert_eq!(s.split(4), None);
assert_eq!(s.split(3), Some((Span { off1: 3, off2: 6 }, Span { off1: 6, off2: 7 })));
}
}
mod append_only_buffer {
use super::super::*;
#[test]
fn basics() {
let mut b = AppendOnlyBuffer::new();
let bytes = "Hello World".as_bytes();
let sp = b.append(bytes);
assert_eq!(b.get(sp), bytes);
let bytes2 = "Just testing".as_bytes();
let sp2 = b.append(bytes2);
assert_eq!(b.get(sp), bytes);
assert_eq!(b.get(sp2), bytes2);
}
}
mod text {
use super::super::*;
#[test]
fn insert_beginning() {
let mut t = Text::new();
assert_eq!(t.len(), 0);
t.insert(0, "World".as_bytes());
assert_eq!(t.len(), 5);
assert_eq!(t.to_utf8_string().unwrap(), "World");
t.insert(0, "Hello ".as_bytes());
assert_eq!(t.len(), 11);
assert_eq!(t.to_utf8_string().unwrap(), "Hello World");
t.insert(0, "...".as_bytes());
assert_eq!(t.len(), 14);
assert_eq!(t.to_utf8_string().unwrap(), "...Hello World");
}
#[test]
fn append() {
let mut t = Text::new();
t.insert(0, "Hello".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello");
t.insert(5, " Bene".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "Hello Bene");
}
#[test]
fn insert_middle() {
let mut t = Text::new();
t.insert(0, "1234".as_bytes());
t.insert(2, "x".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12x34");
t.insert(3, "yz".as_bytes());
assert_eq!(t.to_utf8_string().unwrap(), "12xyz34");
}
#[test]
fn delete_all1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_all2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(0, 6);
assert_eq!(t.len(), 0);
}
#[test]
fn delete_part1() {
let mut t = Text::new();
t.insert(0, "123456".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn delete_part2() {
let mut t = Text::new();
t.insert(0, "456".as_bytes());
t.insert(0, "123".as_bytes());
t.delete(1, 5);
assert_eq!(t.len(), 2);
assert_eq!(t.to_utf8_string().unwrap(), "16");
}
#[test]
fn bytes1() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), bytes);
}
#[test]
fn bytes2() {
let mut t = Text::new();
let bytes = vec![0, 1, 2];
let bytes2 = vec![3, 4];
t.insert(0, &bytes2);
t.insert(0, &bytes);
assert_eq!(t.bytes().collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
}
}
/// Set of possible commands
#[derive(Debug, Clone)]
pub enum Command {
Insert(String),
}
use parser_combinators::primitives::ParseError;
use parser_combinators::{spaces, between, many, char, satisfy, Parser, ParserExt};
impl Command {
pub fn parse(s: &str) -> Result<(Command, &str), ParseError> {
let literal = between(char('/'), char('/'), many(satisfy(|c| c!= '/')).map(Command::Insert));
let spaces = spaces();
spaces.with(char('i').with(literal)).parse(s)
}
}
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
info!("starting up");
//let mut text = Text::new();
let mut args = std::env::args();
args.next().unwrap();
let s = args.next().unwrap();
let cmd = Command::parse(&s);
println!("{:?}", cmd);
}
| link | identifier_name |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y!= Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n))); | }
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
} | }
fn increasing(&self) -> bool {
return false; | random_line_split |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y!= Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals |
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
}
| {
return Rationals { ps : PrimeSeq::new() };
} | identifier_body |
math.rs | use num_bigint::{BigInt, ToBigUint};
use num_traits::{Zero, One, Pow};
use std::collections::{BinaryHeap, HashSet, HashMap, VecDeque};
use std::cmp::{max, Ordering};
use std::hash::Hash;
use std::ops;
use std::fmt;
use crate::ast::{AST, as_int};
pub fn to_usize(n : &BigInt) -> Result<usize, String> {
match ToBigUint::to_biguint(n) {
Some(m) => Ok(m.iter_u64_digits()
.map(|d| d as usize)
.fold(0, |accum, d| accum * (std::u64::MAX as usize) + d)),
None => Err(format!("Could not convert {:?} to usize", n))
}
}
pub trait Sequence {
fn nth(&mut self, n : usize) -> Result<AST, String>;
fn increasing(&self) -> bool;
fn index_of(&mut self, v : AST) -> Option<usize>;
}
pub struct Naturals;
impl Naturals {
pub fn new() -> Naturals {
return Naturals;
}
}
impl Sequence for Naturals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(BigInt::from(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) => to_usize(&n).ok(),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub struct Rat {
pub n : BigInt,
pub d : BigInt
}
impl PartialEq for Rat {
fn eq(&self, other : &Rat) -> bool {
return self.n.clone() * other.d.clone() == other.n.clone() * self.d.clone();
}
}
impl PartialOrd for Rat {
fn partial_cmp(&self, other : &Rat) -> Option<std::cmp::Ordering> {
return (self.n.clone() * other.d.clone()).partial_cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Eq for Rat {
}
impl Ord for Rat {
fn cmp(&self, other: &Rat) -> std::cmp::Ordering {
return (self.n.clone() * other.d.clone()).cmp(&(other.n.clone() * self.d.clone()));
}
}
impl Hash for Rat {
fn hash<H>(&self, state : &mut H) where H: std::hash::Hasher {
let r = self.clone().simplify();
r.n.hash(state);
r.d.hash(state);
}
}
impl fmt::Display for Rat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.n, self.d)
}
}
pub fn gcd(a : BigInt, b : BigInt) -> BigInt {
let mut x = a;
let mut y = b;
while y!= Zero::zero() {
let temp = y.clone();
y = x % y;
x = temp;
}
return x;
}
impl Rat {
pub fn new(n : BigInt, d : BigInt) -> Rat {
let r = Rat { n, d };
return r.simplify();
}
pub fn from_usize(n : usize) -> Rat {
return Rat::new(BigInt::from(n), One::one());
}
pub fn simplify(mut self) -> Rat {
let g = gcd(self.n.clone(), self.d.clone());
self.n /= g.clone();
self.d /= g;
if self.d < Zero::zero() && self.n < Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
}
return self;
}
pub fn pow(mut self, a : &BigInt) -> Rat {
if a > &Zero::zero() {
let mut n : BigInt = One::one();
let orig_n = self.n.clone();
let orig_d = self.d.clone();
while &n < a {
self.n *= orig_n.clone();
self.d *= orig_d.clone();
n += 1;
}
return self;
} else if a < &Zero::zero() {
std::mem::swap(&mut self.n, &mut self.d);
return self.pow(&-a);
} else {
return Rat { n: One::one(), d: One::one() };
}
}
}
impl ops::Add<BigInt> for Rat {
type Output = Rat;
fn add(self, b : BigInt) -> Rat {
return Rat::new(self.n + b * self.d.clone(), self.d);
}
}
impl ops::Sub<BigInt> for Rat {
type Output = Rat;
fn sub(self, b : BigInt) -> Rat {
return Rat::new(self.n - b * self.d.clone(), self.d);
}
}
impl ops::Mul<BigInt> for Rat {
type Output = Rat;
fn mul(self, b : BigInt) -> Rat {
return Rat::new(self.n * b, self.d);
}
}
impl ops::Div<BigInt> for Rat {
type Output = Rat;
fn div(self, b : BigInt) -> Rat {
return Rat::new(self.n, self.d * b);
}
}
impl ops::Add<Rat> for Rat {
type Output = Rat;
fn add(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() + b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Sub<Rat> for Rat {
type Output = Rat;
fn sub(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d.clone() - b.n * self.d.clone(), self.d * b.d);
}
}
impl ops::Mul<Rat> for Rat {
type Output = Rat;
fn mul(self, b : Rat) -> Rat {
return Rat::new(self.n * b.n, self.d * b.d);
}
}
impl ops::Div<Rat> for Rat {
type Output = Rat;
fn div(self, b : Rat) -> Rat {
return Rat::new(self.n * b.d, self.d * b.n);
}
}
impl ops::MulAssign<Rat> for Rat {
fn mul_assign(&mut self, b : Rat) {
self.n *= b.n;
self.d *= b.d;
}
}
impl ops::Neg for Rat {
type Output = Rat;
fn neg(self) -> Rat {
return Rat { n: -self.n, d: self.d };
}
}
pub struct Integers;
impl Integers {
pub fn new() -> Integers {
return Integers;
}
}
pub fn int_nth(n : usize) -> BigInt {
if n % 2 == 0 {
return BigInt::from(n / 2);
} else {
return -BigInt::from((n + 1) / 2);
}
}
impl Sequence for Integers {
// Enumerate the integers as 0,-1,1,-2,2,...
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(int_nth(n)));
}
fn increasing(&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
match v {
AST::Int(n) =>
if n < Zero::zero() {
match to_usize(&-n) {
Ok(m) => Some(2*m - 1),
_ => None
}
} else {
match to_usize(&n) {
Ok(m) => Some(2*m),
_ => None
}
}
_ => None
}
}
}
pub fn prime_factor(n_in : BigInt, ps : &mut PrimeSeq) -> std::collections::hash_map::IntoIter<BigInt, BigInt> {
let mut n = n_in;
let mut powers = HashMap::new();
let mut m = 0;
loop {
let p = ps.at(m);
if p.clone()*p.clone() > n {
break;
}
if n.clone() % p.clone() == Zero::zero() {
*powers.entry(p.clone()).or_insert(Zero::zero()) += 1;
n /= p;
m = 0;
} else {
m += 1;
}
}
*powers.entry(n).or_insert(Zero::zero()) += 1;
return powers.into_iter();
}
pub struct Rationals {
ps : PrimeSeq
}
impl Rationals {
pub fn new() -> Rationals {
return Rationals { ps : PrimeSeq::new() };
}
fn calc_nth(&mut self, n : usize) -> Result<Rat, String> {
let mut res = Rat::from_usize(1);
for (p,a) in prime_factor(BigInt::from(n), &mut self.ps) {
let b = int_nth(to_usize(&a)?);
let r = Rat::new(p.clone(), One::one()).pow(&b);
// println!("{}: {}^({} => {}) = {}", n, p, a, b, r);
res *= r;
}
return Ok(res);
}
}
impl Sequence for Rationals {
fn nth(&mut self, n : usize) -> Result<AST, String> {
if n == 0 {
return Ok(AST::Rat(Rat::from_usize(0)));
}
if n % 2 == 0 {
return Ok(AST::Rat(self.calc_nth(n / 2)?));
} else {
return Ok(AST::Rat(-self.calc_nth((n + 1) / 2)?));
}
}
fn | (&self) -> bool {
return false;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let (mut n,d) = match v {
AST::Int(n) => (n, One::one()),
AST::Rat(Rat{n,d}) => (n,d),
_ => return None
};
let neg = n < Zero::zero();
if neg {
n = -n;
}
let mut powers : HashMap<BigInt, BigInt> = HashMap::new();
for (p,a) in prime_factor(n, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) += a;
}
for (p,a) in prime_factor(d, &mut self.ps) {
*powers.entry(p).or_insert(Zero::zero()) -= a;
}
let mut res = 1;
for (p,a) in powers.into_iter() {
res *= Pow::pow(to_usize(&p).ok()?, Integers.index_of(AST::Int(a))?);
}
if neg {
return Some(2*res - 1);
} else {
return Some(2*res);
}
}
}
pub struct PrimeSeq {
max : usize,
primes : Vec<BigInt>,
primes_set : HashSet<BigInt>,
sieve : Vec<bool>
}
impl PrimeSeq {
pub fn new() -> PrimeSeq {
return PrimeSeq {
max: 3,
primes: vec!(BigInt::from(2)),
primes_set : vec!(BigInt::from(2)).into_iter().collect(),
sieve : vec!(false, false, true)
};
}
fn run_sieve(&mut self, increment : usize) {
let mut i = 0;
while i < increment {
self.sieve.push(true);
i += 1;
}
println!("\nRunning sieve to {}", increment + self.max);
let mut p = 0;
while p < self.sieve.len() {
if self.sieve[p] {
let start = max(p*p, p * (self.max / p + 1));
let mut i = start;
while i < self.sieve.len() {
self.sieve[i] = false;
i += p;
}
if p >= self.max {
self.primes.push(BigInt::from(p));
self.primes_set.insert(BigInt::from(p));
}
}
p += 1;
}
self.max += increment;
}
fn at(&mut self, n : usize) -> BigInt {
if n >= self.primes.len() {
// See: https://en.wikipedia.org/wiki/Prime_number_theorem#Approximations_for_the_nth_prime_number
// This guarantees we will find the nth prime in this round of the sieve
let new_max = if n < 2 { // If n = 1, then loglog(n) is undefined, choose 100 because why not
100
} else {
// We use log2 here because it will overshoot even more than we need, and there's
// no built-in ln anyway.
n*(n.log2() + n.log2().log2())
};
self.run_sieve(new_max - self.max);
}
return self.primes[n].clone();
}
}
pub struct PrimeIt {
n : usize,
seq : PrimeSeq
}
pub fn primes() -> PrimeIt {
return PrimeIt { n : 0, seq : PrimeSeq::new() };
}
impl Iterator for PrimeIt {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
let idx = self.n;
let p = self.seq.at(idx);
self.n += 1;
return Some(p);
}
}
pub struct Factors {
n : BigInt,
m : usize,
ps : PrimeSeq
}
pub fn factor(n : BigInt) -> Factors {
return Factors { n: n, m: 0, ps: PrimeSeq::new() };
}
impl Iterator for Factors {
type Item = BigInt;
fn next(&mut self) -> Option<Self::Item> {
if self.n <= One::one() {
return None;
}
loop {
let p = self.ps.at(self.m);
if self.n.clone() % p.clone() == Zero::zero() {
self.m = 0;
self.n /= p.clone();
return Some(p);
}
self.m += 1;
}
}
}
impl Sequence for PrimeSeq {
fn nth(&mut self, n : usize) -> Result<AST, String> {
return Ok(AST::Int(self.at(n)));
}
fn increasing(&self) -> bool {
return true;
}
fn index_of(&mut self, v : AST) -> Option<usize> {
let n = as_int(v).ok()?;
// The list of primes is never empty.
if &n > self.primes.last().unwrap() {
self.run_sieve(to_usize(&n).ok()? - self.max);
}
let mut min_idx = 0;
let mut max_idx = self.primes.len() - 1;
while max_idx - min_idx > 1 {
let guess = (min_idx + max_idx) / 2;
match self.primes[guess].cmp(&n) {
Ordering::Less => min_idx = guess,
Ordering::Greater => max_idx = guess,
Ordering::Equal => return Some(guess)
}
}
return None;
}
}
| increasing | identifier_name |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len!= 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if!in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if!self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor, | global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
} | random_line_split |
|
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len!= 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if!in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if!self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> | {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
} | identifier_body |
|
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE |
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn checked_tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len!= 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if!in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if!self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
}
| {
return RWIobuf::new(capacity);
} | conditional_block |
server.rs | //! A generic MIO server.
use error::{MioResult, MioError};
use handler::Handler;
use io::{Ready, IoHandle, IoReader, IoWriter, IoAcceptor};
use iobuf::{Iobuf, RWIobuf};
use reactor::Reactor;
use socket::{TcpSocket, TcpAcceptor, SockAddr};
use std::cell::RefCell;
use std::collections::{Deque, RingBuf};
use std::rc::Rc;
// TODO(cgaebel): There's currently no way to kill a server waiting on an
// `accept`.
static READBUF_SIZE: uint = 4096;
// The number of sends that we queue up before pushing back on the client.
static MAX_OUTSTANDING_SENDS: uint = 1;
pub trait PerClient<St> {
fn on_start(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
fn on_read(&mut self, reactor: &mut Reactor, c: &mut ConnectionState<St>, buf: RWIobuf<'static>) -> MioResult<()>;
fn on_close(&mut self, _reactor: &mut Reactor, _c: &mut ConnectionState<St>) -> MioResult<()> { Ok(()) }
}
/// Global state for a server.
pub struct Global<St> {
/// This should really be a lock-free stack. Unfortunately, only a bounded
/// queue is implemented in the standard library. A vec will do for now. =(
readbuf_pool: RefCell<Vec<Vec<u8>>>,
custom_state: St,
}
impl<St> Global<St> {
/// Creates a new global state for a server.
fn new(custom_state: St) -> Global<St> {
Global {
readbuf_pool: RefCell::new(Vec::new()),
custom_state: custom_state,
}
}
/// Mints a new iobuf with the given capacity. If the requested length is
/// less than or equal to 4kb, a pool of iobufs will be used. Recieved data
/// will automatically use iobufs from this pool, and buffers `sent` will be
/// returned to it when empty.
fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> {
if capacity > READBUF_SIZE {
return RWIobuf::new(capacity);
}
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
let mut ret =
match readbuf_pool.pop() {
None => RWIobuf::new(READBUF_SIZE),
Some(v) => RWIobuf::from_vec(v),
};
debug_assert!(ret.cap() == READBUF_SIZE);
ret.set_limits_and_window((0, capacity), (0, capacity)).unwrap();
ret
}
/// Returns an iobuf to the pool, if possible. It's safe to send any iobuf
/// back to the pool, but only iobufs constructed with `make_iobuf` (or
/// luckily compatible other ones) will actually end up in the pool.
fn return_iobuf(&self, buf: RWIobuf<'static>) {
let mut readbuf_pool = self.readbuf_pool.borrow_mut();
match buf.into_vec() {
Some(v) => {
if v.len() == READBUF_SIZE {
readbuf_pool.push(v);
}
},
_ => {},
}
}
#[inline(always)]
pub fn state(&self) -> &St { &self.custom_state }
}
bitflags! {
flags Flags: u8 {
static Readable = 0x01,
static Writable = 0x02,
// Have we ever ticked?
static HaveTicked = 0x04,
// Have we seen EOF on the readng end?
static HasHitEof = 0x08,
}
}
pub struct ConnectionState<St> {
global: Rc<Global<St>>,
fd: TcpSocket,
send_queue: RingBuf<RWIobuf<'static>>,
flags: Flags,
}
impl<St> ConnectionState<St> {
pub fn new(fd: TcpSocket, global: Rc<Global<St>>) -> ConnectionState<St> {
ConnectionState {
global: global,
fd: fd,
send_queue: RingBuf::new(),
flags: Flags::empty(),
}
}
pub fn fd(&self) -> &TcpSocket { &self.fd }
pub fn global(&self) -> &Rc<Global<St>> { &self.global }
pub fn make_iobuf(&self, capacity: uint) -> RWIobuf<'static> { self.global.make_iobuf(capacity) }
pub fn return_iobuf(&self, buf: RWIobuf<'static>) { self.global.return_iobuf(buf) }
pub fn send(&mut self, buf: RWIobuf<'static>) {
self.send_queue.push(buf);
}
}
struct Connection<St, C> {
state: ConnectionState<St>,
per_client: C,
}
fn handle_eof(r: MioResult<()>, flags: &mut Flags) -> MioResult<()> {
match r {
Ok(x) => Ok(x),
Err(e) => {
if e == MioError::eof() {
flags.remove(Readable);
flags.insert(HasHitEof);
Ok(())
} else {
Err(e)
}
}
}
}
impl<St, C: PerClient<St>> Connection<St, C> {
fn new(fd: TcpSocket, global: Rc<Global<St>>, per_client: C) -> Connection<St, C> {
Connection {
state: ConnectionState::new(fd, global),
per_client: per_client,
}
}
fn | (&mut self, reactor: &mut Reactor) -> MioResult<()> {
match self.tick(reactor) {
Ok(x) => Ok(x),
Err(e) => {
// We can't really use this. We already have an error!
let _ = self.per_client.on_close(reactor, &mut self.state);
Err(e)
},
}
}
fn can_continue(&self) -> bool {
let send_queue_len = self.state.send_queue.len();
// readable, and still room on the send queue.
(self.state.flags.contains(Readable) && send_queue_len <= MAX_OUTSTANDING_SENDS)
// writable, and there's still stuff to send.
|| (self.state.flags.contains(Writable) && send_queue_len!= 0)
}
fn tick(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(HaveTicked) {
try!(self.per_client.on_start(reactor, &mut self.state));
self.state.flags.insert(HaveTicked);
}
while self.can_continue() {
// Handle EOFs in the reader by flushing the send queue.
try!(handle_eof(self.fill_buf(reactor), &mut self.state.flags));
// Handle EOFs in the writer by passing it up.
try!(self.flush_buf());
}
// Only report EOF when the send queue is flushed.
if self.state.flags.contains(HasHitEof) && self.state.send_queue.is_empty() {
Err(MioError::eof())
} else {
Ok(())
}
}
fn fill_buf(&mut self, reactor: &mut Reactor) -> MioResult<()> {
if!self.state.flags.contains(Readable) {
return Ok(());
}
let mut in_buf = self.state.make_iobuf(READBUF_SIZE);
let res = try!(self.state.fd.read(&mut in_buf));
if res.would_block() {
self.state.flags.remove(Readable);
}
in_buf.flip_lo();
if!in_buf.is_empty() {
try!(self.per_client.on_read(reactor, &mut self.state, in_buf));
} else {
self.state.flags.insert(HasHitEof);
}
Ok(())
}
fn flush_buf(&mut self) -> MioResult<()> {
if!self.state.flags.contains(Writable) {
return Ok(());
}
let mut drop_head = false;
match self.state.send_queue.front_mut() {
Some(buf) => {
let res = try!(self.state.fd.write(buf));
if res.would_block() {
self.state.flags.remove(Writable);
}
if buf.is_empty() { drop_head = true; }
},
None => {}
}
if drop_head {
let mut first_elem = self.state.send_queue.pop_front().unwrap();
first_elem.flip_lo();
self.state.return_iobuf(first_elem);
}
Ok(())
}
}
impl<St, C: PerClient<St>> Handler for Connection<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Readable);
self.checked_tick(reactor)
}
fn writable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
self.state.flags.insert(Writable);
self.checked_tick(reactor)
}
}
struct AcceptHandler<St, C> {
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C,
}
impl<St, C: PerClient<St>> AcceptHandler<St, C> {
fn new(
accept_socket: TcpAcceptor,
global: Rc<Global<St>>,
on_accept: fn(reactor: &mut Reactor) -> C)
-> AcceptHandler<St, C> {
AcceptHandler {
accept_socket: accept_socket,
global: global,
on_accept: on_accept,
}
}
}
impl<St, C: PerClient<St>> Handler for AcceptHandler<St, C> {
fn readable(&mut self, reactor: &mut Reactor) -> MioResult<()> {
debug!("trying to accept!");
// If a shutdown has been requested, kill the accept thread.
let socket: TcpSocket =
match self.accept_socket.accept() {
Ok(Ready(socket)) => socket,
// It's fine if this didn't work out. We can still accept other
// connections.
_ => return Ok(()),
};
debug!("spawning server.");
let fd = socket.desc().fd;
let per_client = (self.on_accept)(reactor);
let handler = Connection::new(socket, self.global.clone(), per_client);
try!(reactor.register(fd, handler));
debug!("done accept.");
Ok(())
}
fn writable(&mut self, _reactor: &mut Reactor) -> MioResult<()> {
warn!("Accepting socket got a `writable` notification. How odd. Ignoring.");
Ok(())
}
}
// TODO(cgaebel): The connection factory `F` should take the reactor, but
// doesn't because I have no idea how to pass a &mut to an unboxed closure.
pub fn gen_tcp_server<St, C: PerClient<St>>(
reactor: &mut Reactor,
listen_on: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
backlog: uint,
shared_state: St,
on_accept: fn(reactor: &mut Reactor) -> C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let accept_socket: TcpSocket = try!(TcpSocket::v4());
tweak_sock_opts(&accept_socket);
let acceptor: TcpAcceptor = try!(accept_socket.bind(listen_on));
let global = Rc::new(Global::new(shared_state));
let mut on_accept = Some(on_accept);
reactor.listen(acceptor, backlog, |socket| {
AcceptHandler::new(socket, global.clone(), on_accept.take().unwrap())
})
}
pub fn gen_tcp_client<C: PerClient<()>>(
reactor: &mut Reactor,
connect_to: &SockAddr,
tweak_sock_opts: |&TcpSocket|,
client: C)
-> MioResult<()> {
// TODO(cgaebel): ipv6? udp?
let socket: TcpSocket = try!(TcpSocket::v4());
let mut client = Some(client);
let global = Rc::new(Global::new(()));
reactor.connect(socket, connect_to, |socket| {
tweak_sock_opts(&socket);
Connection::new(socket, global.clone(), client.take().unwrap())
})
}
| checked_tick | identifier_name |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout,.. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if!status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn | () -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line|!line.starts_with('=') &&!line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout,.. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line|!line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line|!line.starts_with("~/Library/"))
.take_while(|line|!line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if!trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if!app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if!output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| parse_xctrace_template_list | identifier_name |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout,.. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if!status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line|!line.starts_with('=') &&!line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout,.. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line|!line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line|!line.starts_with("~/Library/"))
.take_while(|line|!line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if!trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if!app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if!output.status.success() |
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
} | conditional_block |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout,.. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if!status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line|!line.starts_with('=') &&!line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout,.. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line|!line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line|!line.starts_with("~/Library/"))
.take_while(|line|!line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if!trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str |
/// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name {
"Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if!app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if!output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
}
| {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
} | identifier_body |
instruments.rs | //! interfacing with the `instruments` command line tool
use std::fmt::Write;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use anyhow::{anyhow, Result};
use cargo::core::Workspace;
use semver::Version;
use crate::opt::AppConfig;
/// Holds available templates.
pub struct TemplateCatalog {
standard_templates: Vec<String>,
custom_templates: Vec<String>,
}
/// Represents the Xcode Instrument version detected.
pub enum XcodeInstruments {
XcTrace,
InstrumentsBinary,
}
impl XcodeInstruments {
/// Detects which version of Xcode Instruments is installed and if it can be launched.
pub(crate) fn detect() -> Result<XcodeInstruments> {
let cur_version = get_macos_version()?;
let macos_xctrace_version = Version::parse("10.15.0").unwrap();
if cur_version >= macos_xctrace_version {
// This is the check used by Homebrew,see
// https://github.com/Homebrew/install/blob/a1d820fc8950312c35073700d0ea88a531bc5950/install.sh#L216
let clt_git_filepath = Path::new("/Library/Developer/CommandLineTools/usr/bin/git");
if clt_git_filepath.exists() {
return Ok(XcodeInstruments::XcTrace);
}
} else {
let instruments_app_filepath = Path::new("/usr/bin/instruments");
if instruments_app_filepath.exists() {
return Ok(XcodeInstruments::InstrumentsBinary);
}
}
Err(anyhow!(
"Xcode Instruments is not installed. Please install the Xcode Command Line Tools."
))
}
/// Return a catalog of available Instruments Templates.
///
/// The custom templates only appears if you have custom templates.
pub(crate) fn available_templates(&self) -> Result<TemplateCatalog> {
match self {
XcodeInstruments::XcTrace => parse_xctrace_template_list(),
XcodeInstruments::InstrumentsBinary => parse_instruments_template_list(),
}
}
/// Prepare the Xcode Instruments profiling command
///
/// If the `xctrace` tool is used, the prepared command looks like
///
/// ```sh
/// xcrun xctrace record --template MyTemplate \
/// --time-limit 5000ms \
/// --output path/to/tracefile \
/// --launch \
/// --
/// ```
///
/// If the older `instruments` tool is used, the prepared command looks
/// like
///
/// ```sh
/// instruments -t MyTemplate \
/// -D /path/to/tracefile \
/// -l 5000ms
/// ```
fn profiling_command(
&self,
template_name: &str,
trace_filepath: &Path,
time_limit: Option<usize>,
) -> Result<Command> {
match self {
XcodeInstruments::XcTrace => {
let mut command = Command::new("xcrun");
command.args(["xctrace", "record"]);
command.args(["--template", template_name]);
if let Some(limit_millis) = time_limit {
let limit_millis_str = format!("{}ms", limit_millis);
command.args(["--time-limit", &limit_millis_str]);
}
command.args(["--output", trace_filepath.to_str().unwrap()]);
// redirect stdin & err to the user's terminal
if let Some(tty) = get_tty()? {
command.args(["--target-stdin", &tty, "--target-stdout", &tty]);
}
command.args(["--launch", "--"]);
Ok(command)
}
XcodeInstruments::InstrumentsBinary => {
let mut command = Command::new("instruments");
command.args(["-t", template_name]);
command.arg("-D").arg(trace_filepath);
if let Some(limit) = time_limit {
command.args(["-l", &limit.to_string()]);
}
Ok(command)
}
}
}
}
/// Return the macOS version.
///
/// This function parses the output of `sw_vers -productVersion` (a string like '11.2.3`)
/// and returns the corresponding semver struct `Version{major: 11, minor: 2, patch: 3}`.
fn get_macos_version() -> Result<Version> {
let Output { status, stdout,.. } =
Command::new("sw_vers").args(["-productVersion"]).output()?;
if!status.success() {
return Err(anyhow!("macOS version cannot be determined"));
}
semver_from_utf8(&stdout)
}
/// Returns a semver given a slice of bytes
///
/// This function tries to construct a semver struct given a raw utf8 byte array
/// that may not contain a patch number, `"11.1"` is parsed as `"11.1.0"`.
fn semver_from_utf8(version: &[u8]) -> Result<Version> {
let to_semver = |version_string: &str| {
Version::parse(version_string).map_err(|error| {
anyhow!("cannot parse version: `{}`, because of {}", version_string, error)
})
};
let version_string = std::str::from_utf8(version)?;
match version_string.split('.').count() {
1 => to_semver(&format!("{}.0.0", version_string.trim())),
2 => to_semver(&format!("{}.0", version_string.trim())),
3 => to_semver(version_string.trim()),
_ => Err(anyhow!("invalid version: {}", version_string)),
}
}
/// Parse xctrace template listing.
///
/// Xctrace prints the list on either stderr (older versions) or stdout (recent).
/// In either case, the expected output is:
///
/// ```
/// == Standard Templates ==
/// Activity Monitor
/// Allocations
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace
/// Time Profiler
/// Zombies
///
/// == Custom Templates ==
/// MyTemplate
/// ```
fn parse_xctrace_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout, stderr } =
Command::new("xcrun").args(["xctrace", "list", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
// Some older versions of xctrace print results on stderr,
// newer version print results on stdout.
let output = if stdout.is_empty() { stderr } else { stdout };
let templates_str = std::str::from_utf8(&output)?;
let mut templates_iter = templates_str.lines();
let standard_templates = templates_iter
.by_ref()
.skip(1)
.map(|line| line.trim())
.take_while(|line|!line.starts_with('=') &&!line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_iter
.map(|line| line.trim())
.skip_while(|line| line.starts_with('=') || line.is_empty())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Parse /usr/bin/instruments template list.
///
/// The expected output on stdout is:
///
/// ```
/// Known Templates:
/// "Activity Monitor"
/// "Allocations"
/// "Animation Hitches"
/// "App Launch"
/// "Blank"
/// "Core Data"
/// "Counters"
/// "Energy Log"
/// "File Activity"
/// "Game Performance"
/// "Leaks"
/// "Logging"
/// "Metal System Trace"
/// "Network"
/// "SceneKit"
/// "SwiftUI"
/// "System Trace"
/// "Time Profiler"
/// "Zombies"
/// "~/Library/Application Support/Instruments/Templates/MyTemplate.tracetemplate"
/// ```
fn parse_instruments_template_list() -> Result<TemplateCatalog> {
let Output { status, stdout,.. } =
Command::new("instruments").args(["-s", "templates"]).output()?;
if!status.success() {
return Err(anyhow!(
"Could not list templates. Please check your Xcode Instruments installation."
));
}
let templates_str = std::str::from_utf8(&stdout)?;
let standard_templates = templates_str
.lines()
.skip(1)
.map(|line| line.trim().trim_matches('"'))
.take_while(|line|!line.starts_with("~/Library/"))
.map(|line| line.into())
.collect::<Vec<_>>();
if standard_templates.is_empty() {
return Err(anyhow!(
"No available templates. Please check your Xcode Instruments installation."
));
}
let custom_templates = templates_str
.lines()
.map(|line| line.trim().trim_matches('"'))
.skip_while(|line|!line.starts_with("~/Library/"))
.take_while(|line|!line.is_empty())
.map(|line| Path::new(line).file_stem().unwrap().to_string_lossy())
.map(|line| line.into())
.collect::<Vec<_>>();
Ok(TemplateCatalog { standard_templates, custom_templates })
}
/// Render the template catalog content as a string.
///
/// The returned string is similar to
///
/// ```text
/// Xcode Instruments templates:
///
/// built-in abbrev
/// --------------------------
/// Activity Monitor
/// Allocations (alloc)
/// Animation Hitches
/// App Launch
/// Core Data
/// Counters
/// Energy Log
/// File Activity (io)
/// Game Performance
/// Leaks
/// Logging
/// Metal System Trace
/// Network
/// SceneKit
/// SwiftUI
/// System Trace (sys)
/// Time Profiler (time)
/// Zombies
///
/// custom
/// --------------------------
/// MyTemplate
/// ```
pub fn render_template_catalog(catalog: &TemplateCatalog) -> String {
let mut output: String = "Xcode Instruments templates:\n".into();
let max_width = catalog
.standard_templates
.iter()
.chain(catalog.custom_templates.iter())
.map(|name| name.len())
.max()
.unwrap();
// column headers
write!(&mut output, "\n{:width$}abbrev", "built-in", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.standard_templates {
output.push('\n');
if let Some(abbrv) = abbrev_name(name.trim_matches('"')) {
write!(&mut output, "{:width$}({abbrev})", name, width = max_width + 2, abbrev = abbrv)
.unwrap();
} else {
output.push_str(name);
}
}
output.push('\n');
// column headers
write!(&mut output, "\n{:width$}", "custom", width = max_width + 2).unwrap();
write!(&mut output, "\n{:-<width$}", "", width = max_width + 8).unwrap();
for name in &catalog.custom_templates {
output.push('\n');
output.push_str(name);
}
output.push('\n');
output
}
/// Compute the tracefile output path, creating the directory structure
/// in `target/instruments` if needed.
fn prepare_trace_filepath(
target_filepath: &Path,
template_name: &str,
app_config: &AppConfig,
workspace_root: &Path,
) -> Result<PathBuf> {
if let Some(ref path) = app_config.trace_filepath {
return Ok(path.to_path_buf());
}
let trace_dir = workspace_root.join("target").join("instruments");
if!trace_dir.exists() {
fs::create_dir_all(&trace_dir)
.map_err(|e| anyhow!("failed to create {:?}: {}", &trace_dir, e))?;
}
let trace_filename = {
let target_shortname = target_filepath
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| anyhow!("invalid target path {:?}", target_filepath))?;
let template_name = template_name.replace(' ', "-");
let now = chrono::Local::now();
format!("{}_{}_{}.trace", target_shortname, template_name, now.format("%F_%H%M%S-%3f"))
};
let trace_filepath = trace_dir.join(trace_filename);
Ok(trace_filepath)
}
/// Return the complete template name, replacing abbreviation if provided.
fn resolve_template_name(template_name: &str) -> &str {
match template_name {
"time" => "Time Profiler",
"alloc" => "Allocations",
"io" => "File Activity",
"sys" => "System Trace",
other => other,
}
}
| "Time Profiler" => Some("time"),
"Allocations" => Some("alloc"),
"File Activity" => Some("io"),
"System Trace" => Some("sys"),
_ => None,
}
}
/// Profile the target binary at `binary_filepath`, write results at
/// `trace_filepath` and returns its path.
pub(crate) fn profile_target(
target_filepath: &Path,
xctrace_tool: &XcodeInstruments,
app_config: &AppConfig,
workspace: &Workspace,
) -> Result<PathBuf> {
// 1. Get the template name from config
// This borrows a ref to the String in Option<String>. The value can be
// unwrapped because in this version the template was checked earlier to
// be a `Some(x)`.
let template_name = resolve_template_name(app_config.template_name.as_deref().unwrap());
// 2. Compute the trace filepath and create its parent directory
let workspace_root = workspace.root().to_path_buf();
let trace_filepath = prepare_trace_filepath(
target_filepath,
template_name,
app_config,
workspace_root.as_path(),
)?;
// 3. Print current activity `Profiling target/debug/tries`
{
let target_shortpath = target_filepath
.strip_prefix(workspace_root)
.unwrap_or(target_filepath)
.to_string_lossy();
let status_detail = format!("{} with template '{}'", target_shortpath, template_name);
workspace.config().shell().status("Profiling", status_detail)?;
}
let mut command =
xctrace_tool.profiling_command(template_name, &trace_filepath, app_config.time_limit)?;
command.arg(target_filepath);
if!app_config.target_args.is_empty() {
command.args(app_config.target_args.as_slice());
}
let output = command.output()?;
if!output.status.success() {
let stderr =
String::from_utf8(output.stderr).unwrap_or_else(|_| "failed to capture stderr".into());
let stdout =
String::from_utf8(output.stdout).unwrap_or_else(|_| "failed to capture stdout".into());
return Err(anyhow!("instruments errored: {} {}", stderr, stdout));
}
Ok(trace_filepath)
}
/// get the tty of th current terminal session
fn get_tty() -> Result<Option<String>> {
let mut command = Command::new("ps");
command.arg("otty=").arg(std::process::id().to_string());
Ok(String::from_utf8(command.output()?.stdout)?
.split_whitespace()
.next()
.map(|tty| format!("/dev/{}", tty)))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn semvers_can_be_parsed() {
assert_eq!(semver_from_utf8(b"2.3.4").unwrap(), Version::parse("2.3.4").unwrap());
assert_eq!(semver_from_utf8(b"11.1").unwrap(), Version::parse("11.1.0").unwrap());
assert_eq!(semver_from_utf8(b"11").unwrap(), Version::parse("11.0.0").unwrap());
}
} | /// Return the template name abbreviation if available.
fn abbrev_name(template_name: &str) -> Option<&str> {
match template_name { | random_line_split |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]:../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo, 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]:../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]:../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if!self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused...
focused_window: self.get_focused_window(),
//... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else |
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| {
Err(FullscreenWMError::UnknownWindow(window))
} | conditional_block |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]:../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo, 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]:../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]:../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if!self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused...
focused_window: self.get_focused_window(),
//... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> | }
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
} | identifier_body |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]:../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo, 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]:../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>, | /// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]:../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn description(&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if!self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused...
focused_window: self.get_focused_window(),
//... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
} | }
/// The errors that this window manager can return.
/// | random_line_split |
a_fullscreen_wm.rs | //! Fullscreen Window Manager
//!
//! Implement the [`WindowManager`] trait by writing a simple window manager
//! that displays every window fullscreen. When a new window is added, the
//! last window that was visible will become invisible.
//!
//! [`WindowManager`]:../../cplwm_api/wm/trait.WindowManager.html
//!
//! Now have a look at the source code of this file, it contains a tutorial to
//! help you write the fullscreen window manager.
//!
//! You are free to remove the documentation in this file that is only part of
//! the tutorial or no longer matches the code after your changes.
//!
//! # Status
//!
//! **TODO**: Replace the question mark below with YES, NO, or PARTIAL to
//! indicate the status of this assignment. If you want to tell something
//! about this assignment to the grader, e.g., you have a bug you can't fix,
//! or you want to explain your approach, write it down after the comments
//! section.
//!
//! COMPLETED: YES
//!
//! COMMENTS: Because at the start of the assignment my Rust skills
//! were poor, the implementation could be better. If I knew then what I know
//! now I would do it totally different. I would make 2 vec's and a focus variable,
//! all of type WindowWithInfo, 1 vec for the windows above the focus variable
//! and one vec for the windows under the focus variable (maybe in reverse order).
//!
//! I used a different file for the tests.
// Because not all methods are implemented yet, some arguments are unused,
// which generates warnings. The annotation below disables this warning.
// Remove this annotation when you have implemented all methods, so you get
// warned about variables that you did not use by mistake.
// We import std::error and std::format so we can say error::Error instead of
// std::error::Error, etc.
use std::error;
use std::fmt;
use std::collections::VecDeque;
// Import some types and the WindowManager trait from the cplwm_api crate
// (defined in the api folder).
use cplwm_api::types::{FloatOrTile, PrevOrNext, Screen, Window, WindowLayout, WindowWithInfo};
use cplwm_api::wm::WindowManager;
/// You are free to choose the name for your window manager. As we will use
/// automated tests when grading your assignment, indicate here the name of
/// your window manager data type so we can just use `WMName` instead of
/// having to manually figure out your window manager name.
pub type WMName = FullscreenWM;
/// The FullscreenWM struct
///
/// The first thing to do when writing a window manager, is to define a struct
/// (or enum) that will contain the state of the window manager, e.g. the
/// managed windows along with their geometries, the focused window, etc.
///
/// Depending on the layout and the functionality the window manager provides,
/// this can vary from simple `Vec`s to trees, hashmaps, etc. You can have a
/// look at the [collections](https://doc.rust-lang.org/std/collections/) Rust
/// provides.
///
/// Remember that you are free to add additional dependencies to your project,
/// e.g., for another type of data structure. But this is certainly not
/// required. For more information, see the Hints & Tricks section of the
/// assignment.
///
/// # Example Representation
///
/// The fullscreen window manager that we are implementing is very simple: it
/// just needs to keep track of all the windows that were added and remember
/// which one is focused. It is not even necessary to remember the geometries
/// of the windows, as they will all be resized to the size of the screen.
///
/// A possible data structure to keep track of the windows is a simple `Vec`:
/// the last element in the vector is the window on top, which is also the
/// only window to display. Why not the first element? Because it is easier to
/// add an element to the end of a vector. This is convenient, as adding a new
/// window should also put it on top of the other windows.
///
/// Another thing we need to keep track of is the `Screen`, because we must
/// resize the windows the size of the screen. A `Screen` is passed via the
/// `new` method of the trait and the `resize_screen` method of the trait
/// updates the screen with a new one.
///
/// These two fields are enough to get started, which does not mean that they
/// are enough to correctly implement this window manager. As you will notice
/// in a short while, there is a problem with this representation. Feel free
/// to add/replace/remove fields.
///
/// To understand the `#derive[(..)]` line before the struct, read the
/// [Supertraits] section of the `WindowManager` trait.
///
/// [Supertraits]:../../cplwm_api/wm/trait.WindowManager.html#supertraits
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
pub struct FullscreenWM {
/// A vector of windows, the first one is on the bottom, the last one is
/// on top, and also the only visible window.
pub windows: VecDeque<Window>,
/// We need to know which size the fullscreen window must be.
pub screen: Screen,
/// Window that is focused
pub focused_window: Option<Window>,
}
/// The errors that this window manager can return.
///
/// For more information about why you need this, read the documentation of
/// the associated [Error] type of the `WindowManager` trait.
///
/// In the code below, we would like to return an error when we are asked to
/// do something with a window that we do not manage, so we define an enum
/// `FullscreenWMError` with one variant: `UnknownWindow`.
///
/// Feel free to add or remove variants from this enum. You may also replace
/// it with a type or struct if you wish to do so.
///
/// [Error]:../../cplwm_api/wm/trait.WindowManager.html#associatedtype.Error
#[derive(Debug)]
pub enum FullscreenWMError {
/// This window is not known by the window manager.
UnknownWindow(Window),
/// Window Already Managed
WindowAlreadyManaged(Window),
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl fmt::Display for FullscreenWMError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
FullscreenWMError::UnknownWindow(ref window) => write!(f, "Unknown window: {}", window),
FullscreenWMError::WindowAlreadyManaged(ref window) => {
write!(f, "Window already managed: {}", window)
}
}
}
}
// This code is explained in the documentation of the associated [Error] type
// of the `WindowManager` trait.
impl error::Error for FullscreenWMError {
fn | (&self) -> &'static str {
match *self {
FullscreenWMError::UnknownWindow(_) => "Unknown window",
FullscreenWMError::WindowAlreadyManaged(_) => "Window Already Managed",
}
}
}
// Now we start implementing our window manager
impl WindowManager for FullscreenWM {
/// We use `FullscreenWMError` as our `Error` type.
type Error = FullscreenWMError;
/// The constructor is straightforward.
///
/// Track the given screen and make a new empty `Vec`.
fn new(screen: Screen) -> FullscreenWM {
FullscreenWM {
windows: VecDeque::new(),
screen: screen,
focused_window: None,
}
}
/// The `windows` field contains all the windows we manage.
///
/// Why do we need a `clone` here?
fn get_windows(&self) -> Vec<Window> {
self.windows.clone().into_iter().collect()
}
/// The last window in the list is the focused one.
///
/// Note that the `last` method of `Vec` returns an `Option`.
fn get_focused_window(&self) -> Option<Window> {
self.focused_window
}
/// To add a window, just push it onto the end the `windows` `Vec`.
///
/// We could choose to return an error when the window is already managed
/// by the window manager, but in this case we just do nothing. You are
/// free to define another error to handle this case.
///
/// Note that we completely ignore the information that comes along with
/// the info, this *could* lead to issues in later assignments.
fn add_window(&mut self, window_with_info: WindowWithInfo) -> Result<(), Self::Error> {
if!self.is_managed(window_with_info.window) {
self.windows.push_back(window_with_info.window);
self.focus_window(Some(window_with_info.window))
} else {
Err(FullscreenWMError::WindowAlreadyManaged(window_with_info.window))
}
}
/// To remove a window, just remove it from the `windows` `Vec`.
///
/// First we look up the position (or index) of the window in `windows`,
/// and then remove it unless the window does not occur in the `Vec`, in
/// which case we return an error.
fn remove_window(&mut self, window: Window) -> Result<(), Self::Error> {
match self.windows.iter().position(|w| *w == window) {
None => Err(FullscreenWMError::UnknownWindow(window)),
Some(i) => {
self.windows.remove(i);
let last_window = self.windows.back().map(|w| *w);
self.focus_window(last_window)
// if self.get_focused_window() == Some(window) {
// self.focus_window(None);
}
}
}
/// Now the most important part: calculating the `WindowLayout`.
///
/// First we build a `Geometry` for a fullscreen window using the
/// `to_geometry` method: it has the same width and height as the screen.
///
/// Then we look at the last window, remember that the `last()` method of
/// `Vec` returns an `Option`.
///
/// * When the `Option` contains `Some(w)`, we know that there was at
/// least one window, and `w`, being the last window in the `Vec` should
/// be focused. As the other windows will not be visible, the `windows`
/// field of `WindowLayout` can just be a `Vec` with one element: the
/// one window along with the fullscreen `Geometry`.
///
/// * When the `Option` is `None`, we know that there are no windows, so
/// we can just return an empty `WindowLayout`.
///
fn get_window_layout(&self) -> WindowLayout {
let fullscreen_geometry = self.screen.to_geometry();
match self.windows.back() {
// If there is at least one window.
Some(w) => {
WindowLayout {
// The last window is focused...
focused_window: self.get_focused_window(),
//... and should fill the screen. The other windows are
// simply hidden.
windows: vec![(*w, fullscreen_geometry)],
}
}
// Otherwise, return an empty WindowLayout
None => WindowLayout::new(),
}
}
// Before you continue any further, first have a look at the bottom of
// this file, where we show you how to write unit tests.
/// Try this yourself
///
/// Don't forget that when the argument is `None`, i.e. no window should
/// be focused, `get_focused_window()` must return `None` afterwards. The
/// `focused_window` field of the `WindowLayout` must also be `None`.
///
/// You will probably have to change the code above (method
/// implementations as well as the `FullscreenWM` struct) to achieve this.
fn focus_window(&mut self, window: Option<Window>) -> Result<(), Self::Error> {
// self.focused_window = window;
match window {
Some(i_window) => {
match self.windows.iter().position(|w| *w == i_window) {
None => Err(FullscreenWMError::UnknownWindow(i_window)),
Some(i) => {
// Set window to front
self.windows.remove(i);
self.windows.push_back(i_window);
self.focused_window = Some(i_window);
Ok(())
}
}
}
None => {
self.focused_window = None;
Ok(())
}
}
}
/// Try this yourself
fn cycle_focus(&mut self, dir: PrevOrNext) {
// You will probably notice here that a `Vec` is not the ideal data
// structure to implement this function. Feel free to replace the
// `Vec` with another data structure.
// Do nothing when there are no windows.
if self.windows.is_empty() {
return ();
}
// if self.get_focused_window() == None {
//
match dir {
PrevOrNext::Prev => {
let last_window = self.windows.pop_back().unwrap();
self.windows.push_front(last_window);
}
PrevOrNext::Next => {
let first_window = self.windows.pop_front().unwrap();
self.windows.push_back(first_window);
}
}
// When there is only one window,
// focus it if currently no window is focused, otherwise do nothing.
// When no window is focused, any window may become focused.
let window = self.windows.back().map(|w| *w);
match self.focus_window(window) {
Ok(_) => {}
Err(e) => println!("Error focus_window {}", e),
}
return ();
}
/// Try this yourself
// It should reflect the current state (location/size, floating or tiled,
// fullscreen or not) of the window.
fn get_window_info(&self, window: Window) -> Result<WindowWithInfo, Self::Error> {
let fullscreen_geometry = self.screen.to_geometry();
if self.is_managed(window) {
Ok(WindowWithInfo {
window: window,
geometry: fullscreen_geometry,
float_or_tile: FloatOrTile::Tile,
fullscreen: true,
})
} else {
Err(FullscreenWMError::UnknownWindow(window))
}
}
/// Try this yourself
fn get_screen(&self) -> Screen {
self.screen
}
/// Try this yourself
fn resize_screen(&mut self, screen: Screen) {
self.screen = screen
}
}
#[cfg(test)]
mod a_fullscreen_wm_tests {
include!("a_fullscreen_wm_tests.rs");
}
| description | identifier_name |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if!is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if!is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> |
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id ?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
} | identifier_body |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if!is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if!is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn | (
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| load_cosmogony | identifier_name |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => |
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if!is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if!is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
}
}
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
}
| {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
} | conditional_block |
lib.rs | #[macro_use]
extern crate include_dir;
pub mod cosmogony;
mod country_finder;
pub mod file_format;
mod hierarchy_builder;
mod mutable_slice;
pub mod zone;
pub mod zone_typer;
pub use crate::cosmogony::{Cosmogony, CosmogonyMetadata, CosmogonyStats};
use crate::country_finder::CountryFinder;
use crate::file_format::OutputFormat;
use crate::hierarchy_builder::{build_hierarchy, find_inclusions};
use crate::mutable_slice::MutableSlice;
use failure::Error;
use failure::ResultExt;
use log::{debug, info};
use osmpbfreader::{OsmObj, OsmPbfReader};
use std::collections::BTreeMap;
use std::fs::File;
use std::path::Path;
pub use crate::zone::{Zone, ZoneIndex, ZoneType};
#[rustfmt::skip]
pub fn is_admin(obj: &OsmObj) -> bool {
match *obj {
OsmObj::Relation(ref rel) => {
rel.tags
.get("boundary")
.map_or(false, |v| v == "administrative")
&&
rel.tags.get("admin_level").is_some()
}
_ => false,
}
}
pub fn get_zones_and_stats(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf with geometries...");
let objects = pbf
.get_objs_and_deps(|o| is_admin(o))
.context("invalid osm file")?;
info!("reading pbf done.");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in objects.values() {
if!is_admin(obj) {
continue;
}
if let OsmObj::Relation(ref relation) = *obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm_with_geom(relation, &objects, next_index) {
// Ignore zone without boundary polygon for the moment
if zone.boundary.is_some() {
zones.push(zone);
}
};
}
}
return Ok((zones, stats));
}
pub fn get_zones_and_stats_without_geom(
pbf: &mut OsmPbfReader<File>,
) -> Result<(Vec<zone::Zone>, CosmogonyStats), Error> {
info!("Reading pbf without geometries...");
let mut zones = vec![];
let stats = CosmogonyStats::default();
for obj in pbf.par_iter().map(Result::unwrap) {
if!is_admin(&obj) {
continue;
}
if let OsmObj::Relation(ref relation) = obj {
let next_index = ZoneIndex { index: zones.len() };
if let Some(zone) = zone::Zone::from_osm(relation, &BTreeMap::default(), next_index) {
zones.push(zone);
}
} | }
Ok((zones, stats))
}
fn get_country_code<'a>(
country_finder: &'a CountryFinder,
zone: &zone::Zone,
country_code: &'a Option<String>,
inclusions: &Vec<ZoneIndex>,
) -> Option<String> {
if let Some(ref c) = *country_code {
Some(c.to_uppercase())
} else {
country_finder.find_zone_country(&zone, &inclusions)
}
}
fn type_zones(
zones: &mut [zone::Zone],
stats: &mut CosmogonyStats,
country_code: Option<String>,
inclusions: &Vec<Vec<ZoneIndex>>,
) -> Result<(), Error> {
use rayon::prelude::*;
info!("reading libpostal's rules");
let zone_typer = zone_typer::ZoneTyper::new()?;
info!("creating a countrys rtree");
let country_finder: CountryFinder = CountryFinder::init(&zones, &zone_typer);
if country_code.is_none() && country_finder.is_empty() {
return Err(failure::err_msg(
"no country_code has been provided and no country have been found, we won't be able to make a cosmogony",
));
}
info!("typing zones");
// We type all the zones in parallele
// To not mutate the zones while doing it
// (the borrow checker would not be happy since we also need to access to the zone's vector
// to be able to transform the ZoneIndex to a zone)
// we collect all the types in a Vector, and assign the zone's zone_type as a post process
let zones_type: Vec<_> = zones
.par_iter()
.map(|z| {
get_country_code(&country_finder, &z, &country_code, &inclusions[z.id.index])
.map(|c| zone_typer.get_zone_type(&z, &c, &inclusions[z.id.index], zones))
})
.collect();
zones
.iter_mut()
.zip(zones_type.into_iter())
.for_each(|(z, zone_type)| match zone_type {
None => {
info!(
"impossible to find a country for {} ({}), skipping",
z.osm_id, z.name
);
stats.zone_without_country += 1;
}
Some(Ok(t)) => z.zone_type = Some(t),
Some(Err(zone_typer::ZoneTyperError::InvalidCountry(c))) => {
info!("impossible to find rules for country {}", c);
*stats.zone_with_unkwown_country_rules.entry(c).or_insert(0) += 1;
}
Some(Err(zone_typer::ZoneTyperError::UnkownLevel(lvl, country))) => {
debug!(
"impossible to find a rule for level {:?} for country {}",
lvl, country
);
*stats
.unhandled_admin_level
.entry(country)
.or_insert_with(BTreeMap::new)
.entry(lvl.unwrap_or(0))
.or_insert(0) += 1;
}
});
Ok(())
}
fn compute_labels(zones: &mut [Zone]) {
info!("computing all zones's label");
let nb_zones = zones.len();
for i in 0..nb_zones {
let (mslice, z) = MutableSlice::init(zones, i);
z.compute_labels(&mslice);
}
}
// we don't want to keep zone's without zone_type (but the zone_type could be ZoneType::NonAdministrative)
fn clean_untagged_zones(zones: &mut Vec<zone::Zone>) {
info!("cleaning untagged zones");
let nb_zones = zones.len();
zones.retain(|z| z.zone_type.is_some());
info!("{} zones cleaned", (nb_zones - zones.len()));
}
fn create_ontology(
zones: &mut Vec<zone::Zone>,
stats: &mut CosmogonyStats,
country_code: Option<String>,
) -> Result<(), Error> {
info!("creating ontology for {} zones", zones.len());
let inclusions = find_inclusions(zones);
type_zones(zones, stats, country_code, &inclusions)?;
build_hierarchy(zones, inclusions);
zones.iter_mut().for_each(|z| z.compute_names());
compute_labels(zones);
// we remove the useless zones from cosmogony
// WARNING: this invalidate the different indexes (we can no longer lookup a Zone by it's id in the zones's vector)
// this should be removed later on (and switch to a map by osm_id?) as it's not elegant,
// but for the moment it'll do
clean_untagged_zones(zones);
Ok(())
}
pub fn build_cosmogony(
pbf_path: String,
with_geom: bool,
country_code: Option<String>,
) -> Result<Cosmogony, Error> {
let path = Path::new(&pbf_path);
let file = File::open(&path).context("no pbf file")?;
let mut parsed_pbf = OsmPbfReader::new(file);
let (mut zones, mut stats) = if with_geom {
get_zones_and_stats(&mut parsed_pbf)?
} else {
get_zones_and_stats_without_geom(&mut parsed_pbf)?
};
create_ontology(&mut zones, &mut stats, country_code)?;
stats.compute(&zones);
let cosmogony = Cosmogony {
zones: zones,
meta: CosmogonyMetadata {
osm_filename: path
.file_name()
.and_then(|f| f.to_str())
.map(|f| f.to_string())
.unwrap_or("invalid file name".into()),
stats: stats,
},
};
Ok(cosmogony)
}
/// Stream Cosmogony's Zone from a Reader
pub fn read_zones(
reader: impl std::io::BufRead,
) -> impl std::iter::Iterator<Item = Result<Zone, Error>> {
reader
.lines()
.map(|l| l.map_err(|e| failure::err_msg(e.to_string())))
.map(|l| {
l.and_then(|l| serde_json::from_str(&l).map_err(|e| failure::err_msg(e.to_string())))
})
}
fn from_json_stream(reader: impl std::io::BufRead) -> Result<Cosmogony, Error> {
let zones = read_zones(reader).collect::<Result<_, _>>()?;
Ok(Cosmogony {
zones,
..Default::default()
})
}
/// Load a cosmogony from a file
pub fn load_cosmogony_from_file(input: &str) -> Result<Cosmogony, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
load_cosmogony(f, format)
}
/// Return an iterator on the zones
/// if the input file is a jsonstream, the zones are streamed
/// if the input file is a json, the whole cosmogony is loaded
pub fn read_zones_from_file(
input: &str,
) -> Result<Box<dyn std::iter::Iterator<Item = Result<Zone, Error>>>, Error> {
let format = OutputFormat::from_filename(input)?;
let f = std::fs::File::open(&input)?;
let f = std::io::BufReader::new(f);
match format {
OutputFormat::JsonGz | OutputFormat::Json => {
let cosmo = load_cosmogony(f, format)?;
Ok(Box::new(cosmo.zones.into_iter().map(|z| Ok(z))))
}
OutputFormat::JsonStream => Ok(Box::new(read_zones(f))),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(f);
let r = std::io::BufReader::new(r);
Ok(Box::new(read_zones(r)))
}
}
}
/// Load a cosmogony from a reader and a file_format
pub fn load_cosmogony(
reader: impl std::io::BufRead,
format: OutputFormat,
) -> Result<Cosmogony, Error> {
match format {
OutputFormat::JsonGz => {
let r = flate2::read::GzDecoder::new(reader);
serde_json::from_reader(r).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::Json => {
serde_json::from_reader(reader).map_err(|e| failure::err_msg(e.to_string()))
}
OutputFormat::JsonStream => from_json_stream(reader),
OutputFormat::JsonStreamGz => {
let r = flate2::bufread::GzDecoder::new(reader);
let r = std::io::BufReader::new(r);
from_json_stream(r)
}
}
} | random_line_split |
|
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like..#.. =>. means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## =>. means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like.##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0:...#..#.#..##......###...###...........
1:...#...#....#.....#..#..#..#...........
2:...##..##...##....#..#..#..##..........
3:..#.#...#..#.#....#..#..#...#..........
4:...#.#..#...#.#...#..#..##..##.........
5:....#...##...#.#..#..#...#...#.........
6:....##.#.#....#...#..##..##..##........
7:...#..###.#...##..#...#...#...#........
8:...#....##.#.#.#..##..##..##..##.......
9:...##..#..#####....#...#...#...#.......
10:..#.#..#...#.##....##..##..##..##......
11:...#...##...#.#...#.#...#...#...#......
12:...##.#.#....#.#...#.#..##..##..##.....
13:..#..###.#....#.#...#....#...#...#.....
14:..#....##.#....#.#..##...##..##..##....
15:..##..#..#.#....#....#..#.#...#...#....
16:.#.#..#...#.#...##...#...#.#..##..##...
17:..#...##...#.#.#.#...##...#....#...#...
18:..##.#.#....#####.#.#.#...##...##..##..
19:.#..###.#..#.#.#######.#.#.#..#.#...#..
20:.#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for.#.#., pot 9 matched.##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn | (state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx!= 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a!= 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... =>.",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| convert_state_str_to_vec | identifier_name |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like..#.. =>. means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## =>. means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like.##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0:...#..#.#..##......###...###...........
1:...#...#....#.....#..#..#..#...........
2:...##..##...##....#..#..#..##..........
3:..#.#...#..#.#....#..#..#...#..........
4:...#.#..#...#.#...#..#..##..##.........
5:....#...##...#.#..#..#...#...#.........
6:....##.#.#....#...#..##..##..##........
7:...#..###.#...##..#...#...#...#........
8:...#....##.#.#.#..##..##..##..##.......
9:...##..#..#####....#...#...#...#.......
10:..#.#..#...#.##....##..##..##..##......
11:...#...##...#.#...#.#...#...#...#......
12:...##.#.#....#.#...#.#..##..##..##.....
13:..#..###.#....#.#...#....#...#...#.....
14:..#....##.#....#.#..##...##..##..##....
15:..##..#..#.#....#....#..#.#...#...#....
16:.#.#..#...#.#...##...#...#.#..##..##...
17:..#...##...#.#.#.#...##...#....#...#...
18:..##.#.#....#####.#.#.#...##...##..##..
19:.#..###.#..#.#.#######.#.#.#..#.#...#..
20:.#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for.#.#., pot 9 matched.##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx!= 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a!= 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... =>.",
]
.iter()
.map(|x| x.to_string()) | let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
} | .collect()
}
#[test]
fn test_convert_state_str_to_vec() { | random_line_split |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like..#.. =>. means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## =>. means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like.##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0:...#..#.#..##......###...###...........
1:...#...#....#.....#..#..#..#...........
2:...##..##...##....#..#..#..##..........
3:..#.#...#..#.#....#..#..#...#..........
4:...#.#..#...#.#...#..#..##..##.........
5:....#...##...#.#..#..#...#...#.........
6:....##.#.#....#...#..##..##..##........
7:...#..###.#...##..#...#...#...#........
8:...#....##.#.#.#..##..##..##..##.......
9:...##..#..#####....#...#...#...#.......
10:..#.#..#...#.##....##..##..##..##......
11:...#...##...#.#...#.#...#...#...#......
12:...##.#.#....#.#...#.#..##..##..##.....
13:..#..###.#....#.#...#....#...#...#.....
14:..#....##.#....#.#..##...##..##..##....
15:..##..#..#.#....#....#..#.#...#...#....
16:.#.#..#...#.#...##...#...#.#..##..##...
17:..#...##...#.#.#.#...##...#....#...#...
18:..##.#.#....#####.#.#.#...##...##..##..
19:.#..###.#..#.#.#######.#.#.#..#.#...#..
20:.#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for.#.#., pot 9 matched.##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx!= 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' {
PotState::HasPlant
} else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 | new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a!= 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... =>.",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum; | identifier_body |
main.rs | /*
--- Day 12: Subterranean Sustainability ---
The year 518 is significantly more underground than your history books implied. Either that, or
you've arrived in a vast cavern network under the North Pole.
After exploring a little, you discover a long tunnel that contains a row of small pots as far as
you can see to your left and right. A few of them contain plants - someone is trying to grow things
in these geothermally-heated caves.
The pots are numbered, with 0 in front of you. To the left, the pots are numbered -1, -2, -3, and
so on; to the right, 1, 2, 3.... Your puzzle input contains a list of pots from 0 to the right and
whether they do (#) or do not (.) currently contain a plant, the initial state. (No other pots
currently contain plants.) For example, an initial state of #..##.... indicates that pots 0, 3, and
4 currently contain plants.
Your puzzle input also contains some notes you find on a nearby table: someone has been trying to
figure out how these plants spread to nearby pots. Based on the notes, for each generation of
plants, a given pot has or does not have a plant based on whether that pot (and the two pots on
either side of it) had a plant in the last generation. These are written as LLCRR => N, where L are
pots to the left, C is the current pot being considered, R are the pots to the right, and N is
whether the current pot will have a plant in the next generation. For example:
A note like..#.. =>. means that a pot that contains a plant but with no plants within two pots of
it will not have a plant in it during the next generation.
A note like ##.## =>. means that an empty pot with two plants on each side of it will remain empty
in the next generation.
A note like.##.# => # means that a pot has a plant in a given generation if, in the previous
generation, there were plants in that pot, the one immediately to the left, and the one two pots to
the right, but not in the ones immediately to the right and two to the left.
It's not clear what these plants are for, but you're sure it's important, so you'd like to make
sure the current configuration of plants is sustainable by determining what will happen after 20
generations.
For example, given the following input:
initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #
For brevity, in this example, only the combinations which do produce a plant are listed. (Your
input includes all possible combinations.) Then, the next 20 generations will look like this:
1 2 3
0 0 0 0
0:...#..#.#..##......###...###...........
1:...#...#....#.....#..#..#..#...........
2:...##..##...##....#..#..#..##..........
3:..#.#...#..#.#....#..#..#...#..........
4:...#.#..#...#.#...#..#..##..##.........
5:....#...##...#.#..#..#...#...#.........
6:....##.#.#....#...#..##..##..##........
7:...#..###.#...##..#...#...#...#........
8:...#....##.#.#.#..##..##..##..##.......
9:...##..#..#####....#...#...#...#.......
10:..#.#..#...#.##....##..##..##..##......
11:...#...##...#.#...#.#...#...#...#......
12:...##.#.#....#.#...#.#..##..##..##.....
13:..#..###.#....#.#...#....#...#...#.....
14:..#....##.#....#.#..##...##..##..##....
15:..##..#..#.#....#....#..#.#...#...#....
16:.#.#..#...#.#...##...#...#.#..##..##...
17:..#...##...#.#.#.#...##...#....#...#...
18:..##.#.#....#####.#.#.#...##...##..##..
19:.#..###.#..#.#.#######.#.#.#..#.#...#..
20:.#....##....#####...#######....#.#..##.
The generation is shown along the left, where 0 is the initial state. The pot numbers are shown
along the top, where 0 labels the center pot, negative-numbered pots extend to the left, and
positive pots extend toward the right. Remember, the initial state begins at pot 0, which is not
the leftmost pot used in this example.
After one generation, only seven plants remain. The one in pot 0 matched the rule looking for
..#.., the one in pot 4 matched the rule looking for.#.#., pot 9 matched.##.., and so on.
In this example, after 20 generations, the pots shown as # contain plants, the furthest left of
which is pot -2, and the furthest right of which is pot 34. Adding up all the numbers of
plant-containing pots after the 20th generation produces 325.
After 20 generations, what is the sum of the numbers of all pots which contain a plant?
--- Part Two ---
You realize that 20 generations aren't enough. After all, these plants will need to last another
1500 years to even reach your timeline, not to mention your future.
After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which
contain a plant?
*/
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
type CombinationId = usize;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum PotState {
HasPlant,
Empty,
}
#[derive(Debug, Copy, Clone)]
struct CombinationBranch {
has_plant: Option<CombinationId>,
empty: Option<CombinationId>,
}
#[derive(Debug, Clone)]
enum Combination {
Branch(CombinationBranch),
Node(PotState),
}
type CombinationsMap = HashMap<CombinationId, Combination>;
type PlantsState = Vec<bool>;
const OFFSET: usize = 1000;
const INITIAL_STATE: &str = "#.####...##..#....#####.##.......##.#..###.#####.###.##.###.###.#...#...##.#.##.#...#..#.##..##.#.##";
fn convert_state_str_to_vec(state: &str) -> PlantsState {
let mut result: PlantsState = state.chars().map(|x| x == '#').collect();
for _ in 0..OFFSET {
result.insert(0, false);
result.push(false);
}
result
}
fn get_id_for_combinations_map_item(
combinations_map: &mut CombinationsMap,
id: CombinationId,
ch: char,
) -> Option<CombinationId> {
if let Some(v) = combinations_map.get(&id) {
if let Combination::Branch(w) = v {
return if ch == '#' { w.has_plant } else { w.empty };
}
}
None
}
fn convert_strs_to_combinations_map(combinations_strs: &mut Vec<String>) -> CombinationsMap {
let mut combinations_map: CombinationsMap = HashMap::new();
let mut current_combination_id = 1;
combinations_map.insert(
0,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
for combination_str in combinations_strs {
let mut prev_combination_id: Option<CombinationId> = None;
fn update_prev_combination(
combinations_map: &mut CombinationsMap,
prev_id_raw: CombinationId,
ch: char,
combination_id: CombinationId,
) {
let existing_combination = combinations_map.get(&prev_id_raw).unwrap();
if let Combination::Branch(mut existing_combination_branch) = existing_combination {
if ch == '#' {
existing_combination_branch.has_plant = Some(combination_id);
} else {
existing_combination_branch.empty = Some(combination_id);
}
combinations_map.insert(
prev_id_raw,
Combination::Branch(existing_combination_branch),
);
}
}
for (idx, ch) in combination_str.chars().take(5).enumerate() {
let mut combination_id = current_combination_id;
let prev_id_raw = prev_combination_id.unwrap_or(0);
combination_id = get_id_for_combinations_map_item(&mut combinations_map, prev_id_raw, ch)
.unwrap_or(combination_id);
// entry does not exist yet
if current_combination_id == combination_id {
if idx!= 4 {
combinations_map.insert(
current_combination_id,
Combination::Branch(CombinationBranch {
has_plant: None,
empty: None,
}),
);
}
update_prev_combination(&mut combinations_map, prev_id_raw, ch, combination_id);
}
prev_combination_id = Some(combination_id);
current_combination_id += 1;
}
let ch = combination_str.chars().nth(9).unwrap();
let node_content = if ch == '#' | else {
PotState::Empty
};
combinations_map.insert(
prev_combination_id.unwrap(),
Combination::Node(node_content),
);
}
combinations_map
}
fn get_result_for_combination_vec(
combinations_map: &mut CombinationsMap,
combination_vec: &mut PlantsState,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for item in combination_vec {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) = combinations_map.get(&combination_id).unwrap()
{
prev_id = if *item {
combination_branch.has_plant
} else {
combination_branch.empty
};
if prev_id.is_none() {
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_input_combinations() -> Vec<String> {
let mut file = File::open("src/input.txt").expect("Unable to open the file");
let mut contents = String::new();
file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let descriptions: Vec<String> = contents.lines().clone().map(|x| x.to_string()).collect();
descriptions
}
fn get_new_state_after_one_generation(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
) -> PlantsState {
let mut new_state: PlantsState = vec![];
let len = orig_state.len();
for idx in 0..len {
if idx < 2 || idx >= len - 2 {
new_state.push(orig_state[idx]);
continue;
}
let mut combination_vec: PlantsState = vec![
orig_state[idx - 2],
orig_state[idx - 1],
orig_state[idx],
orig_state[idx + 1],
orig_state[idx + 2],
];
let new_state_item =
match get_result_for_combination_vec(&mut combinations_map, &mut combination_vec)
.unwrap_or(PotState::Empty)
{
PotState::HasPlant => true,
PotState::Empty => false,
};
new_state.push(new_state_item);
}
new_state
}
fn get_new_state_after_n_generations(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> PlantsState {
let mut new_state: PlantsState = orig_state.clone();
for _ in 0..n_generations {
new_state = get_new_state_after_one_generation(&mut new_state, &mut combinations_map);
}
new_state
}
fn get_pots_with_plant_sum(plants_state: &mut PlantsState) -> i64 {
let mut sum: i64 = 0;
for (idx, state_item) in plants_state.iter().enumerate() {
if *state_item {
sum += idx as i64 - OFFSET as i64;
}
}
sum
}
fn get_pots_with_plant_sum_using_pattern(
orig_state: &mut PlantsState,
mut combinations_map: &mut CombinationsMap,
n_generations: usize,
) -> i64 {
let mut sum: i64;
let mut new_state: PlantsState = orig_state.clone();
let mut last_idx: i64 = 100;
let mut diff_a = 0;
let mut diff_b = 0;
let mut diff_c;
// the number 100 is a random high-enough number found empirically
new_state =
get_new_state_after_n_generations(&mut new_state, &mut combinations_map, last_idx as usize);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
for _ in 0..100 {
diff_c = diff_b;
diff_b = diff_a;
let prev_sum = sum;
new_state = get_new_state_after_n_generations(&mut new_state, &mut combinations_map, 1);
sum = get_pots_with_plant_sum(&mut new_state) as i64;
last_idx += 1;
diff_a = sum - prev_sum;
if diff_a!= 0 && diff_a == diff_b && diff_b == diff_c {
break;
}
}
sum + diff_a * (n_generations as i64 - last_idx as i64)
}
fn main() {
let mut input_combinations = get_input_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut input_combinations);
let mut state_vector = convert_state_str_to_vec(INITIAL_STATE);
let mut final_state_20 =
get_new_state_after_n_generations(&mut state_vector, &mut combinations_map, 20);
let sum_20 = get_pots_with_plant_sum(&mut final_state_20);
let sum_5b =
get_pots_with_plant_sum_using_pattern(&mut state_vector, &mut combinations_map, 50_000_000_000);
println!("Results:");
println!("- (1) sum of pots with plant for 20: {}", sum_20);
println!("- (2) sum of pots with plant for 5b: {}", sum_5b);
}
#[cfg(test)]
mod tests {
use super::*;
fn get_result_for_combination_str(
combinations_map: &mut CombinationsMap,
combination_str: &str,
) -> Option<PotState> {
let mut result: Option<PotState> = None;
let mut prev_id: Option<CombinationId> = None;
for ch in combination_str.chars() {
let combination_id = prev_id.unwrap_or(0);
if let Combination::Branch(combination_branch) =
combinations_map.get(&combination_id).unwrap()
{
let field = if ch == '#' {
combination_branch.has_plant
} else {
combination_branch.empty
};
if field.is_some() {
prev_id = field;
} else {
prev_id = None;
break;
}
}
}
if prev_id.is_some() {
if let Combination::Node(pot_state) = combinations_map.get(&prev_id.unwrap()).unwrap() {
result = Some(*pot_state);
}
}
result
}
fn get_example_combinations() -> Vec<String> {
vec![
"...## => #",
"..#.. => #",
".#... => #",
".#.#. => #",
".#.## => #",
".##.. => #",
".#### => #",
"#.#.# => #",
"#.### => #",
"##.#. => #",
"##.## => #",
"###.. => #",
"###.# => #",
"####. => #",
"..... =>.",
]
.iter()
.map(|x| x.to_string())
.collect()
}
#[test]
fn test_convert_state_str_to_vec() {
let result = convert_state_str_to_vec("#..##");
let mut expected = vec![true, false, false, true, true];
for _ in 0..OFFSET {
expected.insert(0, false);
}
for _ in 0..OFFSET {
expected.push(false);
}
assert_eq!(result, expected)
}
#[test]
fn test_convert_strs_to_combinations_map() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "...##"),
Some(PotState::HasPlant)
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "#####"),
None,
);
assert_eq!(
get_result_for_combination_str(&mut combinations_map, "....."),
Some(PotState::Empty),
);
}
#[test]
fn test_get_new_state_after_one_generation() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let expected_final_state = convert_state_str_to_vec("#...#....#.....#..#..#..#");
let new_state = get_new_state_after_one_generation(&mut orig_state, &mut combinations_map);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_new_state_after_n_generations() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("...#..#.#..##......###...###...........");
let expected_final_state = convert_state_str_to_vec(".#....##....#####...#######....#.#..##.");
let new_state = get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
assert_eq!(new_state, expected_final_state);
}
#[test]
fn test_get_pots_with_plant_sum() {
let mut combinations_strs = get_example_combinations();
let mut combinations_map = convert_strs_to_combinations_map(&mut combinations_strs);
let mut orig_state = convert_state_str_to_vec("#..#.#..##......###...###");
let mut new_state =
get_new_state_after_n_generations(&mut orig_state, &mut combinations_map, 20);
let sum = get_pots_with_plant_sum(&mut new_state);
assert_eq!(sum, 325);
}
}
| {
PotState::HasPlant
} | conditional_block |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn from_registry(client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev,.. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need | Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send +'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all.cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create.last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes.cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every.cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent.cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write.cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the.cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
} | repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
| random_line_split |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn from_registry(client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev,.. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send +'static,
) -> Result<Bytes, Error> |
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all.cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create.last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes.cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every.cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent.cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write.cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the.cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
}
| {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?; | identifier_body |
fetch.rs | use crate::{cargo::Source, util, Krate};
use anyhow::{Context, Error};
use bytes::Bytes;
use reqwest::Client;
use std::path::Path;
use tracing::{error, warn};
use tracing_futures::Instrument;
pub(crate) enum KrateSource {
Registry(Bytes),
Git(crate::git::GitSource),
}
impl KrateSource {
pub(crate) fn len(&self) -> usize {
match self {
Self::Registry(bytes) => bytes.len(),
Self::Git(gs) => gs.db.len() + gs.checkout.as_ref().map(|s| s.len()).unwrap_or(0),
}
}
}
pub(crate) async fn | (client: &Client, krate: &Krate) -> Result<KrateSource, Error> {
async {
match &krate.source {
Source::Git { url, rev,.. } => via_git(&url.clone(), rev).await.map(KrateSource::Git),
Source::Registry { registry, chksum } => {
let url = registry.download_url(krate);
let response = client.get(&url).send().await?.error_for_status()?;
let res = util::convert_response(response).await?;
let content = res.into_body();
util::validate_checksum(&content, &chksum)?;
Ok(KrateSource::Registry(content))
}
}
}
.instrument(tracing::debug_span!("fetch"))
.await
}
pub async fn via_git(url: &url::Url, rev: &str) -> Result<crate::git::GitSource, Error> {
// Create a temporary directory to fetch the repo into
let temp_dir = tempfile::tempdir()?;
// Create another temporary directory where we *may* checkout submodules into
let submodule_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let fetch_url = url.as_str().to_owned();
let fetch_rev = rev.to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &fetch_url, &mut |mut opts| {
opts.download_tags(git2::AutotagOption::All);
repo.remote_anonymous(&fetch_url)?
.fetch(
&[
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
// Ensure that the repo actually contains the revision we need
repo.revparse_single(&fetch_rev)
.with_context(|| format!("{} doesn't contain rev '{}'", fetch_url, fetch_rev))?;
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
let fetch_rev = rev.to_owned();
let temp_db_path = temp_dir.path().to_owned();
let checkout = tokio::task::spawn(async move {
match crate::git::prepare_submodules(
temp_db_path,
submodule_dir.path().to_owned(),
fetch_rev.clone(),
)
.instrument(tracing::debug_span!("submodule checkout"))
.await
{
Ok(_) => {
util::pack_tar(submodule_dir.path())
.instrument(tracing::debug_span!("tarballing checkout", rev = %fetch_rev))
.await
}
Err(e) => Err(e),
}
});
let (db, checkout) = tokio::join!(
async {
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarballing db", %url, %rev))
.await
},
checkout,
);
Ok(crate::git::GitSource {
db: db?,
checkout: checkout?.ok(),
})
}
pub async fn registry(
url: &url::Url,
krates: impl Iterator<Item = String> + Send +'static,
) -> Result<Bytes, Error> {
// We don't bother to suport older versions of cargo that don't support
// bare checkouts of registry indexes, as that has been in since early 2017
// See https://github.com/rust-lang/cargo/blob/0e38712d4d7b346747bf91fb26cce8df6934e178/src/cargo/sources/registry/remote.rs#L61
// for details on why cargo still does what it does
let temp_dir = tempfile::tempdir()?;
let mut init_opts = git2::RepositoryInitOptions::new();
//init_opts.bare(true);
init_opts.external_template(false);
let repo =
git2::Repository::init_opts(&temp_dir, &init_opts).context("failed to initialize repo")?;
let url = url.as_str().to_owned();
// We need to ship off the fetching to a blocking thread so we don't anger tokio
tokio::task::spawn_blocking(move || -> Result<(), Error> {
let git_config =
git2::Config::open_default().context("Failed to open default git config")?;
crate::git::with_fetch_options(&git_config, &url, &mut |mut opts| {
repo.remote_anonymous(&url)?
.fetch(
&[
"refs/heads/master:refs/remotes/origin/master",
"HEAD:refs/remotes/origin/HEAD",
],
Some(&mut opts),
None,
)
.context("Failed to fetch")
})?;
let write_cache = tracing::span!(tracing::Level::DEBUG, "write-cache-entries",);
write_cache.in_scope(|| {
if let Err(e) = write_cache_entries(repo, krates) {
error!("Failed to write all.cache entries: {:#}", e);
}
});
Ok(())
})
.instrument(tracing::debug_span!("fetch"))
.await??;
// We also write a `.last-updated` file just like cargo so that cargo knows
// the timestamp of the fetch
std::fs::File::create(temp_dir.path().join(".last-updated"))
.context("failed to create.last-updated")?;
util::pack_tar(temp_dir.path())
.instrument(tracing::debug_span!("tarball"))
.await
}
/// Writes.cache entries in the registry's directory for all of the specified
/// crates. Cargo will write these entries itself if they don't exist the first
/// time it tries to access the crate's metadata, but this noticeably increases
/// initial fetch times. (see src/cargo/sources/registry/index.rs)
fn write_cache_entries(
repo: git2::Repository,
krates: impl Iterator<Item = String>,
) -> Result<(), Error> {
// the path to the repository itself for bare repositories.
let cache = if repo.is_bare() {
repo.path().join(".cache")
} else {
repo.path().parent().unwrap().join(".cache")
};
std::fs::create_dir_all(&cache)?;
// Every.cache entry encodes the sha1 it was created at in the beginning
// so that cargo knows when an entry is out of date with the current HEAD
let head_commit = {
let branch = repo
.find_branch("origin/master", git2::BranchType::Remote)
.context("failed to find'master' branch")?;
branch
.get()
.target()
.context("unable to find commit for'master' branch")?
};
let head_commit_str = head_commit.to_string();
let tree = repo
.find_commit(head_commit)
.context("failed to find HEAD commit")?
.tree()
.context("failed to get commit tree")?;
// These can get rather large, so be generous
let mut buffer = Vec::with_capacity(32 * 1024);
for krate in krates {
// cargo always normalizes paths to lowercase
let lkrate = krate.to_lowercase();
let mut rel_path = crate::cargo::get_crate_prefix(&lkrate);
rel_path.push('/');
rel_path.push_str(&lkrate);
let path = &Path::new(&rel_path);
buffer.clear();
{
let write_cache = tracing::span!(tracing::Level::DEBUG, "summary", %krate);
let _s = write_cache.enter();
match write_summary(path, &repo, &tree, head_commit_str.as_bytes(), &mut buffer) {
Ok(num_versions) => tracing::debug!("wrote entries for {} versions", num_versions),
Err(e) => {
warn!("unable to create cache entry for crate: {:#}", e);
continue;
}
}
}
let cache_path = cache.join(rel_path);
if let Err(e) = std::fs::create_dir_all(cache_path.parent().unwrap()) {
warn!(
"failed to create parent.cache directories for crate '{}': {:#}",
krate, e
);
continue;
}
if let Err(e) = std::fs::write(&cache_path, &buffer) {
warn!(
"failed to write.cache entry for crate '{}': {:#}",
krate, e
);
}
}
Ok(())
}
fn write_summary<'blob>(
path: &Path,
repo: &'blob git2::Repository,
tree: &git2::Tree<'blob>,
version: &[u8],
buffer: &mut Vec<u8>,
) -> Result<usize, Error> {
let entry = tree
.get_path(path)
.context("failed to get entry for path")?;
let object = entry
.to_object(repo)
.context("failed to get object for entry")?;
let blob = object.as_blob().context("object is not a blob")?;
// Writes the binary summary for the crate to a buffer, see
// src/cargo/sources/registry/index.rs for details
const CURRENT_CACHE_VERSION: u8 = 1;
buffer.push(CURRENT_CACHE_VERSION);
buffer.extend_from_slice(version);
buffer.push(0);
let mut version_count = 0;
for (version, data) in iter_index_entries(blob.content()) {
buffer.extend_from_slice(version);
buffer.push(0);
buffer.extend_from_slice(data);
buffer.push(0);
version_count += 1;
}
Ok(version_count)
}
fn iter_index_entries(blob: &[u8]) -> impl Iterator<Item = (&[u8], &[u8])> {
fn split_blob(haystack: &[u8]) -> impl Iterator<Item = &[u8]> {
struct Split<'a> {
haystack: &'a [u8],
}
impl<'a> Iterator for Split<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> {
if self.haystack.is_empty() {
return None;
}
let (ret, remaining) = match memchr::memchr(b'\n', self.haystack) {
Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]),
None => (self.haystack, &[][..]),
};
self.haystack = remaining;
Some(ret)
}
}
Split { haystack }
}
split_blob(blob).filter_map(|line| {
std::str::from_utf8(line).ok().and_then(|lstr| {
// We need to get the version, as each entry in the.cache
// entry is a tuple of the version and the summary
lstr.find("\"vers\":")
.map(|ind| ind + 7)
.and_then(|ind| lstr[ind..].find('"').map(|bind| ind + bind + 1))
.and_then(|bind| {
lstr[bind..]
.find('"')
.map(|eind| (&line[bind..bind + eind], line))
})
})
})
}
#[cfg(test)]
mod test {
use super::iter_index_entries;
#[test]
fn parses_unpretty() {
const BLOB: &[u8] = include_bytes!("../tests/unpretty-wasi");
let expected = [
"0.0.0",
"0.3.0",
"0.4.0",
"0.5.0",
"0.6.0",
"0.7.0",
"0.9.0+wasi-snapshot-preview1",
"0.10.0+wasi-snapshot-preview1",
];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
#[test]
fn parses_pretty() {
const BLOB: &[u8] = include_bytes!("../tests/pretty-crate");
let expected = ["0.2.0", "0.3.0", "0.3.1", "0.4.0", "0.5.0"];
assert_eq!(expected.len(), iter_index_entries(BLOB).count());
for (exp, (actual, _)) in expected.iter().zip(iter_index_entries(BLOB)) {
assert_eq!(exp.as_bytes(), actual);
}
}
}
| from_registry | identifier_name |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min.. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk. | if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn complete_rtt_measurement(&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7?
}
} | let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone()); | random_line_split |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min.. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) |
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn complete_rtt_measurement(&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7?
}
}
| {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
} | identifier_body |
retransmission.rs | //! Retransmission is covered in section 6.3 of RFC 4960.
//!
//! 1. Perform round-trip time (RTT) measurements from the time a TSN is sent until it is
//! acknowledged.
//! a) A measurement must be made once per round-trip, but no more. I interpret this to mean
//! that only one measurement may be in progress at a time.
//! b) Measurements must not be made on retransmissions. If the TSN being measured is
//! retransmitted, the measurement must be aborted.
//! 2. Adjust the retransmission timeout (RTO) after every measurement is concluded.
//! a) Use the specified smoothing algorithm to calculate a new RTO.
//! b) Clamp the new RTO to RTO.Min.. Option<RTO.Max>
//! c) If RTTVAR is zero, increase it to the clock granularity.
//! 3. Manage the retransmission timer ("T3-rtx").
//! R1) On any DATA send, if the timer is not running, start the timer with RTO.
//! R2) If all outstanding data has been acknowledged, then cancel the timer.
//! R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
//! // TODO:
//! R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged (e.g. it
//! was dropped by the peer), then start the timer.
//! 4. Handle timer expiration.
//! // TODO:
//! E1) Update congestion control as needed.
//! - adjust ssthresh according to Section 7.2.3
//! - set cwnd to the MTU
//! E2) Double RTO up to RTO.Max to provide back-off.
//! E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet based on
//! the MTU.
//! - Any remaining DATA chunks should be "marked for retransmission" and sent as soon as
//! cwnd allows.
//! E4) Restart the timer according to (R1) above.
use std::time::{Duration, Instant};
use tokio_timer;
use super::Association;
use packet::chunk::{Chunk, GapAckBlock};
use packet::TSN;
use stack::settings::DEFAULT_SCTP_PARAMETERS;
/// The retransmission state that will be embedded in every association.
pub struct State {
pub timer: Option<tokio_timer::Sleep>,
pub measurements: Measurements,
pub tx_high_water_mark: TSN,
}
impl State {
pub fn new(tx_high_water_mark: TSN) -> State {
State {
timer: None,
measurements: Measurements::new(),
tx_high_water_mark,
}
}
}
/// Use a trait to add retransmission functionality to Association.
///
/// This is awkward, but there really is a huge amount of state in an association, with many parts
/// interdependent on many other parts. This makes it difficult to cleanly separate concerns such
/// as retransmission in an obvious and simple way. (I.e. without a lot of Rc<RefCell<_>>, for
/// instance.)
///
/// Most C network stack implementations I've seen just interleave all the concerns together, and
/// (in my opinion) this reduces the readability. So we can at least put retransmission concerns
/// in a different source file, even if doing so is only cosmetic.
///
/// We could also have just added more inherent methods to Association here, but I'm hoping that
/// using a trait is more clear.
pub trait Retransmission {
fn on_outgoing_data(&mut self, chunk_tsn: TSN);
fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN, earliest_outstanding_tsn: Option<TSN>);
fn on_gap_ack_block(&mut self, start: TSN, end: TSN); // TODO remove
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]);
fn on_timeout(&mut self);
}
impl Retransmission for Association {
fn on_outgoing_data(&mut self, chunk_tsn: TSN) {
// On fresh transmissions, perform RTT measurements.
if chunk_tsn > self.rtx.tx_high_water_mark {
// This is a newly sent chunk (not a retransmission), so take a measurement if needed.
self.rtx.measurements.on_outgoing_chunk(chunk_tsn);
// Raise the high water mark.
self.rtx.tx_high_water_mark = chunk_tsn;
}
// R1) On any transmission, start the rtx timer if it is not already running.
if self.rtx.timer.is_none() {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto))
}
}
fn on_cumulative_ack(
&mut self,
cumulative_tsn_ack: TSN,
earliest_outstanding_tsn: Option<TSN>,
) {
// Perform RTT measurements
self.rtx.measurements.on_cumulative_ack(cumulative_tsn_ack);
if self.data.sent_queue.is_empty() && self.rtx.timer.is_some() {
// R2) If all outstanding data has been acknowledged, then cancel the timer.
self.rtx.timer = None;
} else if let Some(earliest_outstanding_tsn) = earliest_outstanding_tsn {
// R3) If the earliest outstanding TSN is acknowledged, then restart the timer.
if cumulative_tsn_ack >= earliest_outstanding_tsn {
self.rtx.timer = Some(self.resources.timer.sleep(self.rtx.measurements.rto));
}
}
}
// TODO remove
fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// Perform RTT measurements
self.rtx.measurements.on_gap_ack_block(start, end);
}
fn on_gap_ack_blocks(&mut self, cumulative_tsn_ack: TSN, gap_ack_blocks: &[GapAckBlock]) {
let mut tsn = cumulative_tsn_ack;
for block in gap_ack_blocks {
let ack_start = cumulative_tsn_ack + block.start as u32;
let ack_end = cumulative_tsn_ack + block.end as u32;
// Chunks in the TSN range [ack_start,ack_end] (inclusive) are assumed to
// have been received. However, the receiver has the option of discarding them and
// having us retransmit them, so they must stay in the sent queue until acknowledged
// via the cumulative TSN.
// Perform RTT measurements, if needed
self.rtx.measurements.on_gap_ack_block(ack_start, ack_end);
// This should always be true if the peer is constructing SACKs properly.
if ack_start > tsn + 1 {
let gap_start = tsn + 1;
let gap_end = ack_start - 1;
// This could just be a for loop, whenever std::iter::Step becomes stable.
let mut gap_tsn = gap_start;
loop {
// TODO: Mark this gap chunk for retransmission.
gap_tsn += 1;
if gap_tsn > gap_end {
break;
}
}
}
// TODO: Store received ranges, so we can know if the peer decides to drop them?
// (So we can implement R4.)
tsn = ack_end;
}
// R4) When a TSN previously acknowledged in a gap ack block is no longer acknowledged
// (e.g. it was dropped by the peer), then start the timer.
//
// TODO
}
fn on_timeout(&mut self) {
// E1) Update congestion control as needed.
// - adjust ssthresh according to Section 7.2.3
// - set cwnd to the MTU
// TODO
// E2) Double RTO up to RTO.Max to provide back-off.
self.rtx.measurements.rto *= 2;
let rto_max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
self.rtx.measurements.rto = self.rtx.measurements.rto.min(rto_max);
// E3) Retransmit as many of the earliest DATA chunks as will fit into a single packet
// based on the MTU.
retransmit_immediate(self);
// Any remaining DATA chunks should be "marked for retransmission" and sent as soon
// as cwnd allows.
retransmit_all_except_first(self);
}
}
/// Immediately retransmit the earliest unacknowledged sent chunk. Ideally, we would see how many
/// of the earliest chunks could fit into a packet given the current MTU.
fn retransmit_immediate(association: &mut Association) {
// Retrieve the first unacknowledged chunk.
let rtx_chunk = association.data.sent_queue.front().map(|c| c.clone());
if let Some(rtx_chunk) = rtx_chunk {
// Re-transmit chunk
println!("re-sending chunk: {:?}", rtx_chunk);
association.send_chunk(Chunk::Data(rtx_chunk));
// E4) Restart timer
association.rtx.timer = Some(
association
.resources
.timer
.sleep(association.rtx.measurements.rto),
)
}
}
/// "Mark" a range of unacknowledged packets for retransmission.
fn retransmit_range(association: &mut Association, first: TSN, last: TSN) {
// TODO: Don't retransmit chunks that were acknowledged in the gap-ack blocks of the most
// recent SACK.
// Re-queue unacknowledged chunks in the specified range.
let bytes =
association
.data
.sent_queue
.transfer_range(&mut association.data.send_queue, first, last);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission.
#[allow(unused)]
fn retransmit_all(association: &mut Association) {
// Re-queue unacknowledged chunks
let bytes = association
.data
.sent_queue
.transfer_all(&mut association.data.send_queue);
// Window accounting: Increase the peer receive window by however much we removed from the sent
// queue.
association.peer_rwnd += bytes as u32;
}
/// "Mark" all unacknowledged packets for retransmission, except for the first. (Which was
/// presumably sent via retransmit_immediate().)
fn retransmit_all_except_first(association: &mut Association) {
if let Some(first) = association.data.sent_queue.front().map(|c| c.tsn) {
if let Some(last) = association.data.sent_queue.back().map(|c| c.tsn) {
if last > first {
retransmit_range(association, first + 1, last);
}
}
}
}
#[derive(Clone, Copy)]
struct SmoothingState {
srtt: Duration, // Smoothed round-trip time
rttvar: Duration, // Round-trip time variation
}
pub struct Measurements {
rtt_measurement: Option<(TSN, Instant)>, // An in-progress RTT measurement.
rtt_smoothing: Option<SmoothingState>,
rto: Duration,
}
/// Clock granularity in nanoseconds. Tokio-timer 0.1 has a granularity of 100ms, and tokio-timer
/// 0.2 has a granularity of 1ms.
/// TODO: Upgrade to tokio-timer 0.2!
const CLOCK_GRANULARITY_NS: u32 = 100_000_000; // 100ms
impl Measurements {
pub fn new() -> Measurements {
Measurements {
rtt_measurement: None,
rtt_smoothing: None,
rto: Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_initial),
}
}
/// This should be called for each fresh outgoing chunk (not on retransmissions), so we can
/// decide whether to start a new RTT measurement or not.
pub fn on_outgoing_chunk(&mut self, chunk_tsn: TSN) {
// Start a RTT measurement if one is not already in progress.
if self.rtt_measurement.is_none() {
self.rtt_measurement = Some((chunk_tsn, Instant::now()));
}
}
/// This should be called for each received SACK, so the Measurements can conclude an RTT
/// measurement, if needed.
pub fn on_cumulative_ack(&mut self, cumulative_tsn_ack: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn <= cumulative_tsn_ack {
self.complete_rtt_measurement();
}
}
}
/// This should be called for each gap ack block in each received SACK, so the Measurements
/// can conclude an RTT measurement, if needed.
pub fn on_gap_ack_block(&mut self, start: TSN, end: TSN) {
// If a RTT measurement is in-progress, see if it can be completed.
if let Some((rtt_tsn, _)) = self.rtt_measurement {
if rtt_tsn >= start && rtt_tsn <= end {
self.complete_rtt_measurement();
}
}
}
/// Conclude the current RTT measurement and adjust SRTT (smoothed RTT), RTTVAR (RTT variance),
/// and RTO (retransmission timeout) accordingly.
fn | (&mut self) {
// We have received acknowledgement of the receipt of the measurement TSN, so calculate the
// RTT and related variables.
let (_, rtt_start) = self.rtt_measurement.take().unwrap(); // Caller verifies Some(_).
let rtt = rtt_start.elapsed();
let min = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_min);
let max = Duration::from_millis(DEFAULT_SCTP_PARAMETERS.rto_max);
match self.rtt_smoothing {
Some(SmoothingState {
mut srtt,
mut rttvar,
}) => {
// Update the SRTT/RTTVAR according to RFC 4960 6.3.1 C3.
#[inline]
fn duration_difference(a: &Duration, b: &Duration) -> Duration {
if *a > *b {
*a - *b
} else {
*b - *a
}
}
let beta = DEFAULT_SCTP_PARAMETERS.rto_beta;
let alpha = DEFAULT_SCTP_PARAMETERS.rto_alpha;
// RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
rttvar = rttvar * (beta.1 - beta.0) / beta.1
+ duration_difference(&srtt, &rtt) * beta.0 / beta.1;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
srtt = srtt * (alpha.1 - alpha.0) / alpha.1 + rtt * alpha.0 / alpha.1;
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
None => {
// No current SRTT/RTTVAR has yet been established, so initialize these according
// to RFC 4960 6.3.1 C2.
// SRTT <- R
let srtt = rtt;
// RTTVAR <- R/2
let mut rttvar = rtt / 2;
if rttvar == Duration::new(0, 0) {
// 6.3.1(G1): Adjust a zero RTTVAR to be the clock granularity.
rttvar = Duration::new(0, CLOCK_GRANULARITY_NS);
}
// RTO <- SRTT + 4 * RTTVAR
self.rto = srtt + rttvar * 4;
self.rtt_smoothing = Some(SmoothingState { srtt, rttvar });
}
}
if self.rto < min {
self.rto = min;
} else if self.rto > max {
self.rto = max;
}
fn duration_to_us(duration: Duration) -> u32 {
duration.as_secs() as u32 * 1_000_000 + duration.subsec_nanos() / 1_000
}
trace!(
"New RTT measurement: {:?} srtt={:?} rttvar={:?} rto={:?}",
duration_to_us(rtt),
duration_to_us(self.rtt_smoothing.unwrap().srtt),
duration_to_us(self.rtt_smoothing.unwrap().rttvar),
duration_to_us(self.rto),
);
// TODO: [6.3.1] C4-C7?
}
}
| complete_rtt_measurement | identifier_name |
file.rs | let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate | : UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if!in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if!out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if!in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if!out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1)!= 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow =!flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec!= UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec!= UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
| (&self, path | identifier_name |
file.rs | let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if!in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if!out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64; | let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if!in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if!out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1)!= 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow =!flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec!= UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec!= UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
| random_line_split |
|
file.rs | proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
le | use the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if!in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if!out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if!in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if!out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1)!= 0 {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow =!flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec!= UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec!= UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
| t path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// ca | identifier_body |
file.rs | let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read(&mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// Writes to a specified file using a file descriptor. Before using this call,
/// you must first obtain a file descriptor using the open syscall. Returns bytes written successfully.
/// - fd – file descriptor
/// - base – pointer to the buffer write
/// - len – number of bytes to write
pub fn sys_write(&self, fd: FileDesc, base: UserInPtr<u8>, len: usize) -> SysResult {
info!("write: fd={:?}, base={:?}, len={:#x}", fd, base, len);
self.linux_process()
.get_file_like(fd)?
.write(base.as_slice(len)?)
}
/// read from or write to a file descriptor at a given offset
/// reads up to count bytes from file descriptor fd at offset offset
/// (from the start of the file) into the buffer starting at buf. The file offset is not changed.
pub async fn sys_pread(
&self,
fd: FileDesc,
mut base: UserOutPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pread: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; len];
let len = file_like.read_at(offset, &mut buf).await?;
base.write_array(&buf[..len])?;
Ok(len)
}
/// writes up to count bytes from the buffer
/// starting at buf to the file descriptor fd at offset offset. The file offset is not changed.
pub fn sys_pwrite(
&self,
fd: FileDesc,
base: UserInPtr<u8>,
len: usize,
offset: u64,
) -> SysResult {
info!(
"pwrite: fd={:?}, base={:?}, len={}, offset={}",
fd, base, len, offset
);
self.linux_process()
.get_file_like(fd)?
.write_at(offset, base.as_slice(len)?)
}
/// works just like read except that multiple buffers are filled.
/// reads iov_count buffers from the file
/// associated with the file descriptor fd into the buffers described by iov ("scatter input")
pub async fn sys_readv(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecOut>,
iov_count: usize,
) -> SysResult {
info!("readv: fd={:?}, iov={:?}, count={}", fd, iov_ptr, iov_count);
let mut iovs = iov_ptr.read_iovecs(iov_count)?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let mut buf = vec![0u8; iovs.total_len()];
let len = file_like.read(&mut buf).await?;
iovs.write_from_buf(&buf)?;
Ok(len)
}
/// works just like write except that multiple buffers are written out.
/// writes iov_count buffers of data described
/// by iov to the file associated with the file descriptor fd ("gather output").
pub fn sys_writev(
&self,
fd: FileDesc,
iov_ptr: UserInPtr<IoVecIn>,
iov_count: usize,
) -> SysResult {
info!(
"writev: fd={:?}, iov={:?}, count={}",
fd, iov_ptr, iov_count
);
let iovs = iov_ptr.read_iovecs(iov_count)?;
let buf = iovs.read_to_vec()?;
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
let len = file_like.write(&buf)?;
Ok(len)
}
/// repositions the offset of the open file associated with the file descriptor fd
/// to the argument offset according to the directive whence
pub fn sys_lseek(&self, fd: FileDesc, offset: i64, whence: u8) -> SysResult {
const SEEK_SET: u8 = 0;
const SEEK_CUR: u8 = 1;
const SEEK_END: u8 = 2;
let pos = match whence {
SEEK_SET => SeekFrom::Start(offset as u64),
SEEK_END => SeekFrom::End(offset),
SEEK_CUR => SeekFrom::Current(offset),
_ => return Err(LxError::EINVAL),
};
info!("lseek: fd={:?}, pos={:?}", fd, pos);
let proc = self.linux_process();
let file = proc.get_file(fd)?;
let offset = file.seek(pos)?;
Ok(offset as usize)
}
/// cause the regular file named by path to be truncated to a size of precisely length bytes.
pub fn sys_truncate(&self, path: UserInPtr<u8>, len: usize) -> SysResult {
let path = path.as_c_str()?;
info!("truncate: path={:?}, len={}", path, len);
self.linux_process().lookup_inode(path)?.resize(len)?;
Ok(0)
}
/// cause the regular file referenced by fd to be truncated to a size of precisely length bytes.
pub fn sys_ftruncate(&self, fd: FileDesc, len: usize) -> SysResult {
info!("ftruncate: fd={:?}, len={}", fd, len);
let proc = self.linux_process();
proc.get_file(fd)?.set_len(len as u64)?;
Ok(0)
}
/// copies data between one file descriptor and another.
pub async fn sys_sendfile(
&self,
out_fd: FileDesc,
in_fd: FileDesc,
offset_ptr: UserInOutPtr<u64>,
count: usize,
) -> SysResult {
self.sys_copy_file_range(in_fd, offset_ptr, out_fd, 0.into(), count, 0)
.await
}
/// copies data between one file descriptor and anothe, read from specified offset and write new offset back
pub async fn sys_copy_file_range(
&self,
in_fd: FileDesc,
mut in_offset: UserInOutPtr<u64>,
out_fd: FileDesc,
mut out_offset: UserInOutPtr<u64>,
count: usize,
flags: usize,
) -> SysResult {
info!(
"copy_file_range: in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={}, flags={}",
in_fd, out_fd, in_offset, out_offset, count, flags
);
let proc = self.linux_process();
let in_file = proc.get_file(in_fd)?;
let out_file = proc.get_file(out_fd)?;
let mut buffer = [0u8; 1024];
// for in_offset and out_offset
// null means update file offset
// non-null means update {in,out}_offset instead
let mut read_offset = if!in_offset.is_null() {
in_offset.read()?
} else {
in_file.seek(SeekFrom::Current(0))?
};
let orig_out_file_offset = out_file.seek(SeekFrom::Current(0))?;
let write_offset = if!out_offset.is_null() {
let offset = out_offset.read()?;
out_file.seek(SeekFrom::Start(offset))?
} else {
0
};
// read from specified offset and write new offset back
let mut bytes_read = 0;
let mut total_written = 0;
while bytes_read < count {
let len = buffer.len().min(count - bytes_read);
let read_len = in_file.read_at(read_offset, &mut buffer[..len]).await?;
if read_len == 0 {
break;
}
bytes_read += read_len;
read_offset += read_len as u64;
let mut bytes_written = 0;
let mut rlen = read_len;
while bytes_written < read_len {
let write_len = out_file.write(&buffer[bytes_written..(bytes_written + rlen)])?;
if write_len == 0 {
info!(
"copy_file_range:END_ERR in={:?}, out={:?}, in_offset={:?}, out_offset={:?}, count={} = bytes_read {}, bytes_written {}, write_len {}",
in_fd, out_fd, in_offset, out_offset, count, bytes_read, bytes_written, write_len
);
return Err(LxError::EBADF);
}
bytes_written += write_len;
rlen -= write_len;
}
total_written += bytes_written;
}
if!in_offset.is_null() {
in_offset.write(read_offset)?;
} else {
in_file.seek(SeekFrom::Current(bytes_read as i64))?;
}
out_offset.write_if_not_null(write_offset + total_written as u64)?;
if!out_offset.is_null() {
out_file.seek(SeekFrom::Start(orig_out_file_offset))?;
}
Ok(total_written)
}
/// causes all buffered modifications to file metadata and data to be written to the underlying file systems.
pub fn sys_sync(&self) -> SysResult {
info!("sync:");
let proc = self.linux_process();
proc.root_inode().fs().sync()?;
Ok(0)
}
/// transfers ("flushes") all modified in-core data of (i.e., modified buffer cache pages for) the file
/// referred to by the file descriptor fd to the disk device
pub fn sys_fsync(&self, fd: FileDesc) -> SysResult {
info!("fsync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_all()?;
Ok(0)
}
/// is similar to fsync(), but does not flush modified metadata unless that metadata is needed
pub fn sys_fdatasync(&self, fd: FileDesc) -> SysResult {
info!("fdatasync: fd={:?}", fd);
let proc = self.linux_process();
proc.get_file(fd)?.sync_data()?;
Ok(0)
}
/// Set parameters of device files.
pub fn sys_ioctl(
&self,
fd: FileDesc,
request: usize,
arg1: usize,
arg2: usize,
arg3: usize,
) -> SysResult {
info!(
"ioctl: fd={:?}, request={:#x}, args=[{:#x}, {:#x}, {:#x}]",
fd, request, arg1, arg2, arg3
);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
file_like.ioctl(request, arg1, arg2, arg3)
}
/// Manipulate a file descriptor.
/// - cmd – cmd flag
/// - arg – additional parameters based on cmd
pub fn sys_fcntl(&self, fd: FileDesc, cmd: usize, arg: usize) -> SysResult {
info!("fcntl: fd={:?}, cmd={}, arg={}", fd, cmd, arg);
let proc = self.linux_process();
let file_like = proc.get_file_like(fd)?;
if let Ok(cmd) = FcntlCmd::try_from(cmd) {
match cmd {
FcntlCmd::GETFD => Ok(file_like.flags().close_on_exec() as usize),
FcntlCmd::SETFD => {
let mut flags = file_like.flags();
if (arg & 1)!= 0 {
| flags -= OpenFlags::CLOEXEC;
}
file_like.set_flags(flags)?;
Ok(0)
}
FcntlCmd::GETFL => Ok(file_like.flags().bits()),
FcntlCmd::SETFL => {
file_like.set_flags(OpenFlags::from_bits_truncate(arg))?;
Ok(0)
}
FcntlCmd::DUPFD | FcntlCmd::DUPFD_CLOEXEC => {
let new_fd = proc.get_free_fd_from(arg);
self.sys_dup2(fd, new_fd)?;
let dup = proc.get_file_like(new_fd)?;
let mut flags = dup.flags();
if cmd == FcntlCmd::DUPFD_CLOEXEC {
flags |= OpenFlags::CLOEXEC;
} else {
flags -= OpenFlags::CLOEXEC;
}
dup.set_flags(flags)?;
Ok(new_fd.into())
}
_ => Err(LxError::EINVAL),
}
} else {
Err(LxError::EINVAL)
}
}
/// Checks whether the calling process can access the file pathname
pub fn sys_access(&self, path: UserInPtr<u8>, mode: usize) -> SysResult {
self.sys_faccessat(FileDesc::CWD, path, mode, 0)
}
/// Check user's permissions of a file relative to a directory file descriptor
/// TODO: check permissions based on uid/gid
pub fn sys_faccessat(
&self,
dirfd: FileDesc,
path: UserInPtr<u8>,
mode: usize,
flags: usize,
) -> SysResult {
// TODO: check permissions based on uid/gid
let path = path.as_c_str()?;
let flags = AtFlags::from_bits_truncate(flags);
info!(
"faccessat: dirfd={:?}, path={:?}, mode={:#o}, flags={:?}",
dirfd, path, mode, flags
);
let proc = self.linux_process();
let follow =!flags.contains(AtFlags::SYMLINK_NOFOLLOW);
let _inode = proc.lookup_inode_at(dirfd, path, follow)?;
Ok(0)
}
/// change file timestamps with nanosecond precision
pub fn sys_utimensat(
&mut self,
dirfd: FileDesc,
pathname: UserInPtr<u8>,
times: UserInOutPtr<[TimeSpec; 2]>,
flags: usize,
) -> SysResult {
info!(
"utimensat(raw): dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
const UTIME_NOW: usize = 0x3fffffff;
const UTIME_OMIT: usize = 0x3ffffffe;
let proc = self.linux_process();
let mut times = if times.is_null() {
let epoch = TimeSpec::now();
[epoch, epoch]
} else {
let times = times.read()?;
[times[0], times[1]]
};
let inode = if pathname.is_null() {
let fd = dirfd;
info!("futimens: fd: {:?}, times: {:?}", fd, times);
proc.get_file(fd)?.inode()
} else {
let pathname = pathname.as_c_str()?;
info!(
"utimensat: dirfd: {:?}, pathname: {:?}, times: {:?}, flags: {:#x}",
dirfd, pathname, times, flags
);
let follow = if flags == 0 {
true
} else if flags == AtFlags::SYMLINK_NOFOLLOW.bits() {
false
} else {
return Err(LxError::EINVAL);
};
proc.lookup_inode_at(dirfd, pathname, follow)?
};
let mut metadata = inode.metadata()?;
if times[0].nsec!= UTIME_OMIT {
if times[0].nsec == UTIME_NOW {
times[0] = TimeSpec::now();
}
metadata.atime = rcore_fs::vfs::Timespec {
sec: times[0].sec as i64,
nsec: times[0].nsec as i32,
};
}
if times[1].nsec!= UTIME_OMIT {
if times[1].nsec == UTIME_NOW {
times[1] = TimeSpec::now();
}
metadata.mtime = rcore_fs::vfs::Timespec {
sec: times[1].sec as i64,
nsec: times[1].nsec as i32,
};
}
inode.set_metadata(&metadata)?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `statfs` system call returns information about a mounted filesystem.
/// `path` is the pathname of **any file** within the mounted filesystem.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_statfs(&self, path: UserInPtr<u8>, mut buf: UserOutPtr<StatFs>) -> SysResult {
let path = path.as_c_str()?;
info!("statfs: path={:?}, buf={:?}", path, buf);
// TODO
// 现在 `path` 没用到,因为没实现真正的挂载,不可能搞一个非主要文件系统的路径。
// 实现挂载之后,要用 `path` 分辨路径在哪个文件系统里,根据对应文件系统的特性返回统计信息。
// (以及根据挂载选项填写 `StatFs::f_flags`!)
let info = self.linux_process().root_inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
/// Get filesystem statistics
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
///
/// The `fstatfs` system call returns information about a mounted filesystem.
/// `fd` is the descriptor referencing an open file.
/// `buf` is a pointer to a `StatFs` structure.
pub fn sys_fstatfs(&self, fd: FileDesc, mut buf: UserOutPtr<StatFs>) -> SysResult {
info!("statfs: fd={:?}, buf={:?}", fd, buf);
let info = self.linux_process().get_file(fd)?.inode().fs().info();
buf.write(info.into())?;
Ok(0)
}
}
const F_LINUX_SPECIFIC_BASE: usize = 1024;
/// The file system statistics struct defined in linux
/// (see [linux man statfs(2)](https://man7.org/linux/man-pages/man2/statfs.2.html)).
#[repr(C)]
pub struct StatFs {
f_type: i64,
f_bsize: i64,
f_blocks: u64,
f_bfree: u64,
f_bavail: u64,
f_files: u64,
f_ffree: u64,
f_fsid: (i32, i32),
f_namelen: isize,
f_frsize: isize,
f_flags: isize,
f_spare: [isize; 4],
}
// 保证 `StatFs` 的定义和常见的 linux 一致
static_assertions::const_assert_eq!(120, core::mem::size_of::<StatFs>());
impl From<FsInfo> for StatFs {
fn from(info: FsInfo) -> Self {
StatFs {
// TODO 文件系统的魔数,需要 rcore-fs 提供一个渠道获取
// 但是这个似乎并没有什么用处,新的 vfs 相关函数都去掉了,也许永远填个常数就好了
f_type: 0,
f_bsize: info.bsize as _,
f_blocks: info.blocks as _,
| flags |= OpenFlags::CLOEXEC;
} else {
| conditional_block |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, //!
Question, //?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, //,
Dot, //.
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, //..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // || | LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind!= TokenKind::LineComment && token_kind!= TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind!= TokenKind::LineComment && next_kind!= TokenKind::BlockComment && next_kind!= TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | OrEquals, // |=
EqualEqual, // ==
NotEqual, // !=
ShiftLeft, // << | random_line_split |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, //!
Question, //?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, //,
Dot, //.
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, //..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, //!=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct | <'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind!= TokenKind::LineComment && token_kind!= TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind!= TokenKind::LineComment && next_kind!= TokenKind::BlockComment && next_kind!= TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | TokenIter | identifier_name |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, //!
Question, //?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, //,
Dot, //.
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, //..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, //!=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind!= TokenKind::LineComment && token_kind!= TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind!= TokenKind::LineComment && next_kind!= TokenKind::BlockComment && next_kind!= TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) |
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else {
self.cur += 1;
}
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
} | identifier_body |
tokens.rs | use crate::protocol::input_source::{
InputPosition as InputPosition,
InputSpan
};
/// Represents a particular kind of token. Some tokens represent
/// variable-character tokens. Such a token is always followed by a
/// `TokenKind::SpanEnd` token.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TokenKind {
// Variable-character tokens, followed by a SpanEnd token
Ident, // regular identifier
Pragma, // identifier with prefixed `#`, range includes `#`
Integer, // integer literal
String, // string literal, range includes `"`
Character, // character literal, range includes `'`
LineComment, // line comment, range includes leading `//`, but not newline
BlockComment, // block comment, range includes leading `/*` and trailing `*/`
// Punctuation (single character)
Exclamation, //!
Question, //?
Pound, // #
OpenAngle, // <
OpenCurly, // {
OpenParen, // (
OpenSquare, // [
CloseAngle, // >
CloseCurly, // }
CloseParen, // )
CloseSquare, // ]
Colon, // :
Comma, //,
Dot, //.
SemiColon, // ;
// Operator-like (single character)
At, // @
Plus, // +
Minus, // -
Star, // *
Slash, // /
Percent, // %
Caret, // ^
And, // &
Or, // |
Tilde, // ~
Equal, // =
// Punctuation (two characters)
ColonColon, // ::
DotDot, //..
ArrowRight, // ->
// Operator-like (two characters)
AtEquals, // @=
PlusPlus, // ++
PlusEquals, // +=
MinusMinus, // --
MinusEquals, // -=
StarEquals, // *=
SlashEquals, // /=
PercentEquals, // %=
CaretEquals, // ^=
AndAnd, // &&
AndEquals, // &=
OrOr, // ||
OrEquals, // |=
EqualEqual, // ==
NotEqual, //!=
ShiftLeft, // <<
LessEquals, // <=
ShiftRight, // >>
GreaterEquals, // >=
// Operator-like (three characters)
ShiftLeftEquals,// <<=
ShiftRightEquals, // >>=
// Special marker token to indicate end of variable-character tokens
SpanEnd,
}
impl TokenKind {
/// Returns true if the next expected token is the special `TokenKind::SpanEnd` token. This is
/// the case for tokens of variable length (e.g. an identifier).
fn has_span_end(&self) -> bool {
return *self <= TokenKind::BlockComment
}
/// Returns the number of characters associated with the token. May only be called on tokens
/// that do not have a variable length.
fn num_characters(&self) -> u32 {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
if *self <= TokenKind::Equal {
1
} else if *self <= TokenKind::GreaterEquals {
2
} else {
3
}
}
/// Returns the characters that are represented by the token, may only be called on tokens that
/// do not have a variable length.
pub fn token_chars(&self) -> &'static str {
debug_assert!(!self.has_span_end() && *self!= TokenKind::SpanEnd);
use TokenKind as TK;
match self {
TK::Exclamation => "!",
TK::Question => "?",
TK::Pound => "#",
TK::OpenAngle => "<",
TK::OpenCurly => "{",
TK::OpenParen => "(",
TK::OpenSquare => "[",
TK::CloseAngle => ">",
TK::CloseCurly => "}",
TK::CloseParen => ")",
TK::CloseSquare => "]",
TK::Colon => ":",
TK::Comma => ",",
TK::Dot => ".",
TK::SemiColon => ";",
TK::At => "@",
TK::Plus => "+",
TK::Minus => "-",
TK::Star => "*",
TK::Slash => "/",
TK::Percent => "%",
TK::Caret => "^",
TK::And => "&",
TK::Or => "|",
TK::Tilde => "~",
TK::Equal => "=",
TK::ColonColon => "::",
TK::DotDot => "..",
TK::ArrowRight => "->",
TK::AtEquals => "@=",
TK::PlusPlus => "++",
TK::PlusEquals => "+=",
TK::MinusMinus => "--",
TK::MinusEquals => "-=",
TK::StarEquals => "*=",
TK::SlashEquals => "/=",
TK::PercentEquals => "%=",
TK::CaretEquals => "^=",
TK::AndAnd => "&&",
TK::AndEquals => "&=",
TK::OrOr => "||",
TK::OrEquals => "|=",
TK::EqualEqual => "==",
TK::NotEqual => "!=",
TK::ShiftLeft => "<<",
TK::LessEquals => "<=",
TK::ShiftRight => ">>",
TK::GreaterEquals => ">=",
TK::ShiftLeftEquals => "<<=",
TK::ShiftRightEquals => ">>=",
// Lets keep these in explicitly for now, in case we want to add more symbols
TK::Ident | TK::Pragma | TK::Integer | TK::String | TK::Character |
TK::LineComment | TK::BlockComment | TK::SpanEnd => unreachable!(),
}
}
}
/// Represents a single token at a particular position.
pub struct Token {
pub kind: TokenKind,
pub pos: InputPosition,
}
impl Token {
pub(crate) fn new(kind: TokenKind, pos: InputPosition) -> Self {
Self{ kind, pos }
}
}
/// The kind of token ranges that are specially parsed by the tokenizer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenRangeKind {
Module,
Pragma,
Import,
Definition,
Code,
}
pub const NO_RELATION: i32 = -1;
pub const NO_SIBLING: i32 = NO_RELATION;
/// A range of tokens with a specific meaning. Such a range is part of a tree
/// where each parent tree envelops all of its children.
#[derive(Debug)]
pub struct TokenRange {
// Index of parent in `TokenBuffer.ranges`, does not have a parent if the
// range kind is Module, in that case the parent index is -1.
pub parent_idx: i32,
pub range_kind: TokenRangeKind,
pub curly_depth: u32,
// Offsets into `TokenBuffer.ranges`: the tokens belonging to this range.
pub start: u32, // first token (inclusive index)
pub end: u32, // last token (exclusive index)
// Child ranges
pub num_child_ranges: u32, // Number of subranges
pub first_child_idx: i32, // First subrange (or -1 if no subranges)
pub last_child_idx: i32, // Last subrange (or -1 if no subranges)
pub next_sibling_idx: i32, // Next subrange (or -1 if no next subrange)
}
pub struct TokenBuffer {
pub tokens: Vec<Token>,
pub ranges: Vec<TokenRange>,
}
impl TokenBuffer {
pub(crate) fn new() -> Self {
Self{ tokens: Vec::new(), ranges: Vec::new() }
}
pub(crate) fn iter_range<'a>(&'a self, range: &TokenRange) -> TokenIter<'a> {
TokenIter::new(self, range.start as usize, range.end as usize)
}
pub(crate) fn start_pos(&self, range: &TokenRange) -> InputPosition {
self.tokens[range.start as usize].pos
}
pub(crate) fn end_pos(&self, range: &TokenRange) -> InputPosition {
let last_token = &self.tokens[range.end as usize - 1];
if last_token.kind == TokenKind::SpanEnd {
return last_token.pos
} else {
debug_assert!(!last_token.kind.has_span_end());
return last_token.pos.with_offset(last_token.kind.num_characters());
}
}
}
/// Iterator over tokens within a specific `TokenRange`.
pub(crate) struct TokenIter<'a> {
tokens: &'a Vec<Token>,
cur: usize,
end: usize,
}
impl<'a> TokenIter<'a> {
fn new(buffer: &'a TokenBuffer, start: usize, end: usize) -> Self {
Self{ tokens: &buffer.tokens, cur: start, end }
}
/// Returns the next token (may include comments), or `None` if at the end
/// of the range.
pub(crate) fn next_including_comments(&self) -> Option<TokenKind> {
if self.cur >= self.end {
return None;
}
let token = &self.tokens[self.cur];
Some(token.kind)
}
/// Returns the next token (but skips over comments), or `None` if at the
/// end of the range
pub(crate) fn next(&mut self) -> Option<TokenKind> {
while let Some(token_kind) = self.next_including_comments() {
if token_kind!= TokenKind::LineComment && token_kind!= TokenKind::BlockComment {
return Some(token_kind);
}
self.consume();
}
return None
}
/// Peeks ahead by one token (i.e. the one that comes after `next()`), and
/// skips over comments
pub(crate) fn peek(&self) -> Option<TokenKind> {
for next_idx in self.cur + 1..self.end {
let next_kind = self.tokens[next_idx].kind;
if next_kind!= TokenKind::LineComment && next_kind!= TokenKind::BlockComment && next_kind!= TokenKind::SpanEnd {
return Some(next_kind);
}
}
return None;
}
/// Returns the start position belonging to the token returned by `next`. If
/// there is not a next token, then we return the end position of the
/// previous token.
pub(crate) fn last_valid_pos(&self) -> InputPosition {
if self.cur < self.end {
// Return token position
return self.tokens[self.cur].pos
}
// Return previous token end
let token = &self.tokens[self.cur - 1];
return if token.kind == TokenKind::SpanEnd {
token.pos
} else {
token.pos.with_offset(token.kind.num_characters())
};
}
/// Returns the token range belonging to the token returned by `next`. This
/// assumes that we're not at the end of the range we're iterating over.
/// TODO: @cleanup Phase out?
pub(crate) fn next_positions(&self) -> (InputPosition, InputPosition) {
debug_assert!(self.cur < self.end);
let token = &self.tokens[self.cur];
if token.kind.has_span_end() {
let span_end = &self.tokens[self.cur + 1];
debug_assert_eq!(span_end.kind, TokenKind::SpanEnd);
(token.pos, span_end.pos)
} else {
let offset = token.kind.num_characters();
(token.pos, token.pos.with_offset(offset))
}
}
/// See `next_positions`
pub(crate) fn next_span(&self) -> InputSpan {
let (begin, end) = self.next_positions();
return InputSpan::from_positions(begin, end)
}
/// Advances the iterator to the next (meaningful) token.
pub(crate) fn consume(&mut self) {
if let Some(kind) = self.next_including_comments() {
if kind.has_span_end() {
self.cur += 2;
} else |
}
}
/// Saves the current iteration position, may be passed to `load` to return
/// the iterator to a previous position.
pub(crate) fn save(&self) -> (usize, usize) {
(self.cur, self.end)
}
pub(crate) fn load(&mut self, saved: (usize, usize)) {
self.cur = saved.0;
self.end = saved.1;
}
} | {
self.cur += 1;
} | conditional_block |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
}
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn | () {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k!= 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456,
0.8134131062097899,
0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
}
| gamma_stability | identifier_name |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
}
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn gamma_stability() {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k!= 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456, | 0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
} | 0.8134131062097899, | random_line_split |
value_stability.rs | // Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use average::assert_almost_eq;
use core::fmt::Debug;
use rand::Rng;
use rand_distr::*;
fn get_rng(seed: u64) -> impl rand::Rng {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// We only assert approximate equality since some platforms do not perform
/// identically (i686-unknown-linux-gnu and most notably x86_64-pc-windows-gnu).
trait ApproxEq {
fn assert_almost_eq(&self, rhs: &Self);
}
impl ApproxEq for f32 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-6);
}
}
impl ApproxEq for f64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_almost_eq!(self, rhs, 1e-14);
}
}
impl ApproxEq for u64 {
fn assert_almost_eq(&self, rhs: &Self) {
assert_eq!(self, rhs);
}
}
impl<T: ApproxEq> ApproxEq for [T; 2] {
fn assert_almost_eq(&self, rhs: &Self) |
}
impl<T: ApproxEq> ApproxEq for [T; 3] {
fn assert_almost_eq(&self, rhs: &Self) {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
self[2].assert_almost_eq(&rhs[2]);
}
}
fn test_samples<F: Debug + ApproxEq, D: Distribution<F>>(
seed: u64, distr: D, expected: &[F],
) {
let mut rng = get_rng(seed);
for val in expected {
let x = rng.sample(&distr);
x.assert_almost_eq(val);
}
}
#[test]
fn binomial_stability() {
// We have multiple code paths: np < 10, p > 0.5
test_samples(353, Binomial::new(2, 0.7).unwrap(), &[1, 1, 2, 1]);
test_samples(353, Binomial::new(20, 0.3).unwrap(), &[7, 7, 5, 7]);
test_samples(353, Binomial::new(2000, 0.6).unwrap(), &[1194, 1208, 1192, 1210]);
}
#[test]
fn geometric_stability() {
test_samples(464, StandardGeometric, &[3, 0, 1, 0, 0, 3, 2, 1, 2, 0]);
test_samples(464, Geometric::new(0.5).unwrap(), &[2, 1, 1, 0, 0, 1, 0, 1]);
test_samples(464, Geometric::new(0.05).unwrap(), &[24, 51, 81, 67, 27, 11, 7, 6]);
test_samples(464, Geometric::new(0.95).unwrap(), &[0, 0, 0, 0, 1, 0, 0, 0]);
// expect non-random behaviour for series of pre-determined trials
test_samples(464, Geometric::new(0.0).unwrap(), &[u64::max_value(); 100][..]);
test_samples(464, Geometric::new(1.0).unwrap(), &[0; 100][..]);
}
#[test]
fn hypergeometric_stability() {
// We have multiple code paths based on the distribution's mode and sample_size
test_samples(7221, Hypergeometric::new(99, 33, 8).unwrap(), &[4, 3, 2, 2, 3, 2, 3, 1]); // Algorithm HIN
test_samples(7221, Hypergeometric::new(100, 50, 50).unwrap(), &[23, 27, 26, 27, 22, 24, 31, 22]); // Algorithm H2PE
}
#[test]
fn unit_ball_stability() {
test_samples(2, UnitBall, &[
[0.018035709265959987f64, -0.4348771383120438, -0.07982762085055706],
[0.10588569388223945, -0.4734350111375454, -0.7392104908825501],
[0.11060237642041049, -0.16065642822852677, -0.8444043930440075]
]);
}
#[test]
fn unit_circle_stability() {
test_samples(2, UnitCircle, &[
[-0.9965658683520504f64, -0.08280380447614634],
[-0.9790853270389644, -0.20345004884984505],
[-0.8449189758898707, 0.5348943112253227],
]);
}
#[test]
fn unit_sphere_stability() {
test_samples(2, UnitSphere, &[
[0.03247542860231647f64, -0.7830477442152738, 0.6211131755296027],
[-0.09978440840914075, 0.9706650829833128, -0.21875184231323952],
[0.2735582468624679, 0.9435374242279655, -0.1868234852870203],
]);
}
#[test]
fn unit_disc_stability() {
test_samples(2, UnitDisc, &[
[0.018035709265959987f64, -0.4348771383120438],
[-0.07982762085055706, 0.7765329819820659],
[0.21450745997299503, 0.7398636984333291],
]);
}
#[test]
fn pareto_stability() {
test_samples(213, Pareto::new(1.0, 1.0).unwrap(), &[
1.0423688f32, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(213, Pareto::new(2.0, 0.5).unwrap(), &[
9.019295276219136f64,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
#[test]
fn poisson_stability() {
test_samples(223, Poisson::new(7.0).unwrap(), &[5.0f32, 11.0, 6.0, 5.0]);
test_samples(223, Poisson::new(7.0).unwrap(), &[9.0f64, 5.0, 7.0, 6.0]);
test_samples(223, Poisson::new(27.0).unwrap(), &[28.0f32, 32.0, 36.0, 36.0]);
}
#[test]
fn triangular_stability() {
test_samples(860, Triangular::new(2., 10., 3.).unwrap(), &[
5.74373257511361f64,
7.890059162791258f64,
4.7256280652553455f64,
2.9474808121184077f64,
3.058301946314053f64,
]);
}
#[test]
fn normal_inverse_gaussian_stability() {
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6568966f32, 1.3744819, 2.216063, 0.11488572,
]);
test_samples(213, NormalInverseGaussian::new(2.0, 1.0).unwrap(), &[
0.6838707059642927f64,
2.4447306460569784,
0.2361045023235968,
1.7774534624785319,
]);
}
#[test]
fn pert_stability() {
// mean = 4, var = 12/7
test_samples(860, Pert::new(2., 10., 3.).unwrap(), &[
4.908681667460367,
4.014196196158352,
2.6489397149197234,
3.4569780580044727,
4.242864311947118,
]);
}
#[test]
fn inverse_gaussian_stability() {
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(),&[
0.9339157f32, 1.108113, 0.50864697, 0.39849377,
]);
test_samples(213, InverseGaussian::new(1.0, 3.0).unwrap(), &[
1.0707604954722476f64,
0.9628140605340697,
0.4069687656468226,
0.660283852985818,
]);
}
#[test]
fn gamma_stability() {
// Gamma has 3 cases: shape == 1, shape < 1, shape > 1
test_samples(223, Gamma::new(1.0, 5.0).unwrap(), &[
5.398085f32, 9.162783, 0.2300583, 1.7235851,
]);
test_samples(223, Gamma::new(0.8, 5.0).unwrap(), &[
0.5051203f32, 0.9048302, 3.095812, 1.8566116,
]);
test_samples(223, Gamma::new(1.1, 5.0).unwrap(), &[
7.783878094584059f64,
1.4939528171618057,
8.638017638857592,
3.0949337228829004,
]);
// ChiSquared has 2 cases: k == 1, k!= 1
test_samples(223, ChiSquared::new(1.0).unwrap(), &[
0.4893526200348249f64,
1.635249736808788,
0.5013580219361969,
0.1457735613733489,
]);
test_samples(223, ChiSquared::new(0.1).unwrap(), &[
0.014824404726978617f64,
0.021602123937134326,
0.0000003431429746851693,
0.00000002291755769542258,
]);
test_samples(223, ChiSquared::new(10.0).unwrap(), &[
12.693656f32, 6.812016, 11.082001, 12.436167,
]);
// FisherF has same special cases as ChiSquared on each param
test_samples(223, FisherF::new(1.0, 13.5).unwrap(), &[
0.32283646f32, 0.048049655, 0.0788893, 1.817178,
]);
test_samples(223, FisherF::new(1.0, 1.0).unwrap(), &[
0.29925257f32, 3.4392934, 9.567652, 0.020074,
]);
test_samples(223, FisherF::new(0.7, 13.5).unwrap(), &[
3.3196593155045124f64,
0.3409169916262829,
0.03377989856426519,
0.00004041672861036937,
]);
// StudentT has same special cases as ChiSquared
test_samples(223, StudentT::new(1.0).unwrap(), &[
0.54703987f32, -1.8545331, 3.093162, -0.14168274,
]);
test_samples(223, StudentT::new(1.1).unwrap(), &[
0.7729195887949754f64,
1.2606210611616204,
-1.7553606501113175,
-2.377641221169782,
]);
// Beta has two special cases:
//
// 1. min(alpha, beta) <= 1
// 2. min(alpha, beta) > 1
test_samples(223, Beta::new(1.0, 0.8).unwrap(), &[
0.8300703726659456,
0.8134131062097899,
0.47912589330631555,
0.25323238071138526,
]);
test_samples(223, Beta::new(3.0, 1.2).unwrap(), &[
0.49563509121756827,
0.9551305482256759,
0.5151181353461637,
0.7551732971235077,
]);
}
#[test]
fn exponential_stability() {
test_samples(223, Exp1, &[
1.079617f32, 1.8325565, 0.04601166, 0.34471703,
]);
test_samples(223, Exp1, &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
test_samples(223, Exp::new(2.0).unwrap(), &[
0.5398085f32, 0.91627824, 0.02300583, 0.17235851,
]);
test_samples(223, Exp::new(1.0).unwrap(), &[
1.0796170642388276f64,
1.8325565304274,
0.04601166186842716,
0.3447170217100157,
]);
}
#[test]
fn normal_stability() {
test_samples(213, StandardNormal, &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, StandardNormal, &[
-0.11844188827977231f64,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
]);
test_samples(213, Normal::new(0.0, 1.0).unwrap(), &[
-0.11844189f32, 0.781378, 0.06563994, -1.1932899,
]);
test_samples(213, Normal::new(2.0, 0.5).unwrap(), &[
1.940779055860114f64,
2.3906889818886174,
2.0328199698479,
1.4033550497906813,
]);
test_samples(213, LogNormal::new(0.0, 1.0).unwrap(), &[
0.88830346f32, 2.1844804, 1.0678421, 0.30322206,
]);
test_samples(213, LogNormal::new(2.0, 0.5).unwrap(), &[
6.964174338639032f64,
10.921015733601452,
7.6355881556915906,
4.068828213584092,
]);
}
#[test]
fn weibull_stability() {
test_samples(213, Weibull::new(1.0, 1.0).unwrap(), &[
0.041495778f32, 0.7531094, 1.4189332, 0.38386202,
]);
test_samples(213, Weibull::new(2.0, 0.5).unwrap(), &[
1.1343478702739669f64,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
#[cfg(feature = "alloc")]
#[test]
fn dirichlet_stability() {
let mut rng = get_rng(223);
assert_eq!(
rng.sample(Dirichlet::new([1.0, 2.0, 3.0]).unwrap()),
[0.12941567177708177, 0.4702121891675036, 0.4003721390554146]
);
assert_eq!(rng.sample(Dirichlet::new([8.0; 5]).unwrap()), [
0.17684200044809556,
0.29915953935953055,
0.1832858056608014,
0.1425623503573967,
0.19815030417417595
]);
// Test stability for the case where all alphas are less than 0.1.
assert_eq!(
rng.sample(Dirichlet::new([0.05, 0.025, 0.075, 0.05]).unwrap()),
[
0.00027580456855692104,
2.296135759821706e-20,
3.004118281150937e-9,
0.9997241924273248
]
);
}
#[test]
fn cauchy_stability() {
test_samples(353, Cauchy::new(100f64, 10.0).unwrap(), &[
77.93369152808678f64,
90.1606912098641,
125.31516221323625,
86.10217834773925,
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
// We use a lower threshold of 1e-5 here.
let distr = Cauchy::new(10f32, 7.0).unwrap();
let mut rng = get_rng(353);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for &a in expected.iter() {
let b = rng.sample(&distr);
assert_almost_eq!(a, b, 1e-5);
}
}
| {
self[0].assert_almost_eq(&rhs[0]);
self[1].assert_almost_eq(&rhs[1]);
} | identifier_body |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn from_node(node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
//...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
}
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it" | false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/ | /// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool { | random_line_split |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn from_node(node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
//...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 |
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it"
/// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool {
false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/
| {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
} | identifier_body |
text_layout_engine.rs | #![allow(dead_code)]
// XXX: should be no harfbuzz in the interface
use crate::node::NativeWord;
use crate::xetex_font_info::{GlyphBBox, XeTeXFontInst};
//use crate::xetex_font_manager::PlatformFontRef;
use crate::cmd::XetexExtCmd;
use crate::xetex_font_info::GlyphID;
use crate::xetex_layout_interface::FixedPoint;
use crate::xetex_layout_interface::XeTeXLayoutEngine;
use crate::xetex_scaledmath::Scaled;
use harfbuzz_sys::hb_tag_t;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TextDirection {
LTR,
RTL,
}
// Annoying XeTeXFontMgr singleton accessors
// pub unsafe fn getFullName(fontRef: PlatformFontRef) -> *const libc::c_char;
// pub unsafe fn getDesignSize(font: *mut XeTeXFontInst) -> f64;
// pub unsafe fn findFontByName(name: &CStr, var: Option<&mut String>, size: f64) -> PlatformFontRef;
// pub unsafe fn terminate_font_manager();
// pub unsafe fn destroy_font_manager();
// Internal to XeTeXLayoutEngine but could use improvement
// pub unsafe fn getGlyphs(engine: XeTeXLayoutEngine, glyphs: *mut u32);
// pub unsafe fn getGlyphAdvances(engine: XeTeXLayoutEngine, advances: *mut f32);
// pub unsafe fn getGlyphPositions(engine: XeTeXLayoutEngine, positions: *mut FloatPoint);
// engine : *font_layout_engine.offset((*node.offset(4)).b16.s2 as isize) as CFDictionaryRef;
pub(crate) struct LayoutRequest<'a> {
// ```text
// let txtLen = (*node.offset(4)).b16.s1 as libc::c_long;
// let txtPtr = node.offset(6) as *mut UniChar;
// slice::from_raw_parts(txtPtr, txtLen)
// ```
pub text: &'a [u16],
// node.offset(1).b32.s1
pub line_width: Scaled,
// let f = let mut f: libc::c_uint = (*node.offset(4)).b16.s2 as libc::c_uint;
// *font_letter_space.offset(f as usize)
pub letter_space_unit: Scaled,
/// Only used by AAT
pub justify: bool,
}
impl<'a> LayoutRequest<'a> {
/// Unsafety: obviously, dereferences raw node pointer. The lifetime is also pulled out of
/// thin air, so just keep it in scope, ok?
pub(crate) unsafe fn | (node: &'a NativeWord, justify: bool) -> LayoutRequest<'a> {
use crate::xetex_ini::FONT_LETTER_SPACE;
let text = node.text();
let line_width = node.width();
let f = node.font() as usize;
let letter_space_unit = FONT_LETTER_SPACE[f];
LayoutRequest {
text,
line_width,
letter_space_unit,
justify,
}
}
}
pub(crate) struct NodeLayout {
pub lsDelta: Option<Scaled>,
pub width: Scaled,
pub total_glyph_count: u16,
pub glyph_info: *mut FixedPoint,
}
impl NodeLayout {
pub(crate) unsafe fn write_node(&self, node: &mut NativeWord) {
let NodeLayout {
lsDelta,
width,
total_glyph_count,
glyph_info,
} = *self;
node.set_width(width + lsDelta.unwrap_or(Scaled::ZERO));
node.set_glyph_count(total_glyph_count);
node.set_glyph_info_ptr(glyph_info as *mut _);
}
}
/// Stuff that should be added as XeTeXFontInst methods
trait FontInstance {
unsafe fn countGlyphs(font: *mut XeTeXFontInst) -> u32;
unsafe fn getGlyphWidth(font: *mut XeTeXFontInst, gid: u32) -> f32;
unsafe fn setFontLayoutDir(font: *mut XeTeXFontInst, vertical: libc::c_int);
unsafe fn getIndLanguage(font: *mut XeTeXFontInst, script: hb_tag_t, index: u32) -> hb_tag_t;
unsafe fn countFeatures(font: *mut XeTeXFontInst, script: hb_tag_t, language: hb_tag_t) -> u32;
unsafe fn getIndFeature(
font: *mut XeTeXFontInst,
script: hb_tag_t,
language: hb_tag_t,
index: u32,
) -> hb_tag_t;
unsafe fn countScripts(font: *mut XeTeXFontInst) -> u32;
unsafe fn getIndScript(font: *mut XeTeXFontInst, index: u32) -> hb_tag_t;
unsafe fn countLanguages(font: *mut XeTeXFontInst, script: hb_tag_t) -> u32;
unsafe fn getSlant(font: *mut XeTeXFontInst) -> Scaled;
unsafe fn getFontTablePtr(font: *mut XeTeXFontInst, tableTag: u32) -> *mut libc::c_void;
// unsafe fn deleteFont(mut font: *mut XeTeXFontInst);
}
// Not quite layout engine things
// pub unsafe fn createFont(fontRef: PlatformFontRef, pointSize: Fixed) -> *mut XeTeXFontInst;
// pub unsafe fn createFontFromFile(
// filename: &CStr,
// index: libc::c_int,
// pointSize: Fixed,
// ) -> *mut XeTeXFontInst;
// // Misc static dictionary lookups/setters
// pub unsafe fn set_cp_code(fontNum: libc::c_int, code: libc::c_uint, side: libc::c_int, value: libc::c_int);
// pub unsafe fn get_cp_code(
// fontNum: libc::c_int,
// code: libc::c_uint,
// side: libc::c_int,
// ) -> libc::c_int;
/*pub struct GlyphBBoxCache {
//...
}
impl GlyphBBoxCache {
/// getCachedGlyphBBox
pub unsafe fn get(fontID: u16, glyphID: u16) -> Option<GlyphBBox> {
unimplemented!()
}
pub unsafe fn store(fontID: u16, glyphID: u16, bbox: GlyphBBox) {
unimplemented!()
}
}*/
#[repr(u8)]
pub enum GlyphEdge {
Left = 1,
Top = 2,
Right = 3,
Bottom = 4,
}
impl GlyphEdge {
/// If a glyph is left or right
#[inline]
pub fn is_side(&self) -> bool {
match *self {
GlyphEdge::Left | GlyphEdge::Right => true,
_ => false,
}
}
#[inline]
pub fn pick_from(&self, options: &(f32, f32)) -> f32 {
match *self {
GlyphEdge::Left | GlyphEdge::Top => options.0,
GlyphEdge::Right | GlyphEdge::Bottom => options.1,
}
}
pub fn from_int(i: i32) -> Option<Self> {
Some(match i {
1 => GlyphEdge::Left,
2 => GlyphEdge::Top,
3 => GlyphEdge::Right,
4 => GlyphEdge::Bottom,
_ => return None,
})
}
}
#[enum_dispatch::enum_dispatch]
pub(crate) enum NativeFont {
#[cfg(target_os = "macos")]
Aat(crate::xetex_aatfont::AATLayoutEngine),
Otgr(XeTeXLayoutEngine),
}
impl NativeFont {
pub(crate) fn flag(&self) -> u32 {
match self {
#[cfg(target_os = "macos")]
Self::Aat(_) => 0xFFFF,
Self::Otgr(_) => 0xFFFE,
}
}
}
#[enum_dispatch::enum_dispatch(NativeFont)]
pub(crate) trait TextLayoutEngine {
/// The most important trait method. Lay out some text and return its size.
unsafe fn layout_text(&mut self, request: LayoutRequest) -> NodeLayout;
/// getFontFilename
fn font_filename(&self, index: &mut u32) -> String;
//unsafe fn print_font_name(&self, c: i32, arg1: i32, arg2: i32);
/// getFontInst
//fn font_instance(&self) -> &XeTeXFontInst;
// should implement Drop
// unsafe fn deleteLayoutEngine(mut engine: XeTeXLayoutEngine);
unsafe fn glyph_width(&self, gid: u32) -> f64;
// XXX: make a single struct for make_font_def to consume, of all the required values
unsafe fn get_font_metrics(&self) -> (Scaled, Scaled, Scaled, Scaled, Scaled);
/// ot_font_get, aat_font_get
unsafe fn poorly_named_getter(&self, what: XetexExtCmd) -> i32;
/// ot_font_get_1, aat_font_get_1
unsafe fn poorly_named_getter_1(&self, what: XetexExtCmd, param1: i32) -> i32;
/// ot_font_get_2, aat_font_get_2
unsafe fn poorly_named_getter_2(&self, what: XetexExtCmd, param1: i32, param2: i32) -> i32;
unsafe fn poorly_named_getter_3(
&self,
what: XetexExtCmd,
param1: i32,
param2: i32,
param3: i32,
) -> i32;
unsafe fn get_flags(&self, font_number: usize) -> u16;
/// getExtendFactor
fn extend_factor(&self) -> f64;
/// getPointSize
fn point_size(&self) -> f64;
/// getAscentAndDescent
fn ascent_and_descent(&self) -> (f32, f32);
/// getCapAndXHeight
fn cap_and_x_height(&self) -> (f32, f32);
/// getEmboldenFactor
fn embolden_factor(&self) -> f32;
/// as r,g,b,a bytes, in order (careful of endianness maybe at output phase)
fn rgb_value(&self) -> u32;
/// getSlantFactor
unsafe fn slant_factor(&self) -> f64;
/// getGlyphName
unsafe fn glyph_name(&self, gid: GlyphID) -> String;
/// getGlyphBounds (had out param)
unsafe fn glyph_bbox(&self, glyphID: u32) -> Option<GlyphBBox>;
unsafe fn get_glyph_width_from_engine(&self, glyphID: u32) -> f64;
/// getGlyphHeightDepth (had out params height, depth)
unsafe fn glyph_height_depth(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphSidebearings (had out params lsb, rsb)
unsafe fn glyph_sidebearings(&self, glyphID: u32) -> Option<(f32, f32)>;
/// getGlyphItalCorr
unsafe fn glyph_ital_correction(&self, glyphID: u32) -> Option<f64>;
/// mapCharToGlyph
/// Should probably just use engine.font as this just passes on the call
/// This is used for 'fallback in case lacks an OS/2 table', and also for adding accents
/// (get_native_char_sidebearings).
/// Although the shaping engine should probably be doing the latter, not xetex0!
fn map_char_to_glyph(&self, chr: char) -> u32;
/// getFontCharRange
/// Another candidate for using XeTeXFontInst directly
fn font_char_range(&self, reqFirst: i32) -> i32;
/// mapGlyphToIndex
/// Should use engine.font directly
fn map_glyph_to_index(&self, glyph_name: &str) -> i32;
// Provided methods, override if using stuff
/// Default impl is { false }.
/// Only used directly with xetex0.
fn using_graphite(&self) -> bool {
false
}
/// Returns true if "user asked for Graphite line breaking and the font supports it"
/// Only relevant if this engine actually uses graphite, hence default impl of { false }
unsafe fn initGraphiteBreaking(&mut self, _txt: &[u16]) -> bool {
false
}
/// Not sure what AAT should return, since this is only called with random casts to
/// XeTeXLayoutENgine in xetex0.
fn using_open_type(&self) -> bool {
false
}
unsafe fn is_open_type_math_font(&self) -> bool {
false
}
}
/*
trait GraphiteFontSomething {
unsafe fn countGraphiteFeatures(&self) -> u32;
unsafe fn getGraphiteFeatureCode(&self, index: u32) -> u32;
unsafe fn countGraphiteFeatureSettings(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureSettingCode(&self, featureID: u32, index: u32) -> u32;
unsafe fn getGraphiteFeatureDefaultSetting(&self, featureID: u32) -> u32;
unsafe fn getGraphiteFeatureLabel(&self, featureID: u32) -> *mut libc::c_char;
unsafe fn getGraphiteFeatureSettingLabel(
&self,
featureID: u32,
settingID: u32,
) -> *mut libc::c_char;
unsafe fn findGraphiteFeature(
&self,
s: *const libc::c_char,
e: *const libc::c_char,
f: *mut hb_tag_t,
v: *mut libc::c_int,
) -> bool;
unsafe fn findGraphiteFeatureNamed(
&self,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
unsafe fn findGraphiteFeatureSettingNamed(
&self,
id: u32,
name: *const libc::c_char,
namelength: libc::c_int,
) -> libc::c_long;
}
*/
| from_node | identifier_name |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if!msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api!= COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status!= JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait] | impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
} | random_line_split |
|
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if!msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api!= COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status!= JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) |
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| {
self.pending_jobs.remove(idx);
} | conditional_block |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn | (&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if!msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api!= COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status!= JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
}
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| stopped | identifier_name |
server.rs | use crate::api::pos_grpc_service::PosGrpcService;
use crate::{DEFAULT_BITS_PER_INDEX, DEFAULT_INDEXES_PER_CYCLE, DEFAULT_SALT};
use anyhow::{bail, Result};
use pos_api::api::job::JobStatus;
use pos_api::api::pos_data_service_server::PosDataServiceServer;
use pos_api::api::{
AbortJobRequest, AddJobRequest, Config, Job, JobStatusStreamResponse, Provider,
};
use pos_compute::{get_providers, PosComputeProvider, COMPUTE_API_CLASS_CPU};
use std::collections::HashMap;
use tokio::sync::mpsc;
use tokio::sync::mpsc::Sender;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Server;
use tonic::Status;
use xactor::*;
/// PosServer is a Spacemesh proof of space data generator service.
/// The service manages a pool of compute providers (gpus) and schedules
/// client-submitted jobs to use these providers to create pos data and to report job
/// progress and errors to clients.
/// todo: support aborting an in-progress job
pub(crate) struct PosServer {
providers: Vec<PosComputeProvider>, // gpu compute providers
pending_jobs: Vec<Job>, // pending
pub(crate) jobs: HashMap<u64, Job>, // in progress
pub(crate) config: Config, // compute config
pub(crate) providers_pool: Vec<u32>, // idle providers
job_status_subscribers: HashMap<u64, Sender<Result<JobStatusStreamResponse, Status>>>,
}
#[async_trait::async_trait]
impl Actor for PosServer {
async fn started(&mut self, _ctx: &mut Context<Self>) -> Result<()> {
info!("PosServer system service starting...");
Ok(())
}
async fn stopped(&mut self, _ctx: &mut Context<Self>) {
info!("PosServer system service stopped");
}
}
impl Service for PosServer {}
impl Default for PosServer {
fn default() -> Self {
PosServer {
providers: vec![],
pending_jobs: vec![],
jobs: Default::default(),
config: Config {
data_dir: "./".to_string(),
indexes_per_compute_cycle: DEFAULT_INDEXES_PER_CYCLE,
bits_per_index: DEFAULT_BITS_PER_INDEX,
salt: hex::decode(DEFAULT_SALT).unwrap(),
n: 512,
r: 1,
p: 1,
},
providers_pool: vec![],
job_status_subscribers: HashMap::default(),
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct Init {
/// server base config - must be set when initializing
pub(crate) use_cpu_provider: bool,
}
/// Init the service
#[async_trait::async_trait]
impl Handler<Init> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Init) -> Result<()> {
for p in get_providers() {
if!msg.use_cpu_provider && p.compute_api == COMPUTE_API_CLASS_CPU {
info!(
"skipping cpu provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
continue;
}
if msg.use_cpu_provider && p.compute_api!= COMPUTE_API_CLASS_CPU {
info!("Skipping non-cpu provider. {}: {}", p.id, p.model);
continue;
}
info!(
"Adding to pool provider id: {}, model: {}, compute_api: {}",
p.id,
p.model,
pos_api::api_extensions::get_provider_class_string(p.compute_api)
);
self.providers_pool.push(p.id);
self.providers.push(p);
}
if self.providers.is_empty() {
bail!("no compatible compute providers are available on the system.")
}
Ok(())
}
}
#[message(result = "Result<Vec<Provider>>")]
pub(crate) struct GetAllProviders;
// Returns all system providers
#[async_trait::async_trait]
impl Handler<GetAllProviders> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: GetAllProviders,
) -> Result<Vec<Provider>> {
let mut res = vec![];
for p in self.providers.iter() {
res.push(Provider {
id: p.id,
model: p.model.clone(),
class: p.compute_api as i32,
})
}
Ok(res)
}
}
#[message(result = "Result<Vec<Job>>")]
pub(crate) struct GetAllJobs;
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetAllJobs> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetAllJobs) -> Result<Vec<Job>> {
let mut res: Vec<Job> = self.jobs.values().cloned().collect();
for job in self.pending_jobs.iter() {
res.push(job.clone())
}
Ok(res)
}
}
#[message(result = "Result<Option<Job>>")]
pub(crate) struct GetJob(pub(crate) u64);
// Returns job with current status for job id
#[async_trait::async_trait]
impl Handler<GetJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: GetJob) -> Result<Option<Job>> {
if let Some(job) = self.jobs.get(&msg.0) {
Ok(Some(job.clone()))
} else if let Some(job) = self.pending_jobs.iter().find(|&j| j.id == msg.0) {
Ok(Some(job.clone()))
} else {
Ok(None)
}
}
}
#[message(result = "Result<()>")]
pub(crate) struct UpdateJobStatus(pub(crate) Job);
// Update job status - should only be called from a task which is processing the job
#[async_trait::async_trait]
impl Handler<UpdateJobStatus> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: UpdateJobStatus) -> Result<()> {
let updated_job = msg.0;
if let Some(_) = self.jobs.get(&updated_job.id) {
// job is running or stopped
if updated_job.status!= JobStatus::Started as i32 {
info!(
"job {} finished. Releasing gpu {} pool",
updated_job.id, updated_job.compute_provider_id
);
// Job stopped or completed - release provider id of job to pool
self.providers_pool.push(updated_job.compute_provider_id);
// pick a pending job any start it
if let Some(new_job) = self.pending_jobs.pop() {
info!("starting queued job {}", new_job.id);
self.start_task(&new_job).await?;
} else {
info!("no queued jobs");
}
}
// update job data
self.jobs.insert(updated_job.id, updated_job.clone());
} else if let Some(idx) = self
.pending_jobs
.iter()
.position(|j| j.id == updated_job.id)
{
self.pending_jobs.remove(idx);
self.pending_jobs.insert(idx, updated_job.clone());
} else {
error!("unrecognized job")
}
// update all job status subscribers
for sub in self.job_status_subscribers.clone().iter() {
let res = sub
.1
.send(Ok(JobStatusStreamResponse {
job: Some(updated_job.clone()),
}))
.await;
match res {
Ok(()) => info!("sent updated job status to subscriber"),
Err(e) => {
error!(
"failed to send updated job status to subscriber. deleting it: {}",
e
);
self.job_status_subscribers.remove(sub.0);
}
}
}
Ok(())
}
}
#[message(result = "Result<Job>")]
pub(crate) struct AddJob(pub(crate) AddJobRequest);
#[async_trait::async_trait]
impl Handler<AddJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AddJob) -> Result<Job> {
let data = msg.0;
let job = Job {
id: rand::random(),
bits_written: 0,
size_bits: data.post_size_bits,
started: 0,
submitted: datetime::Instant::now().seconds() as u64,
stopped: 0,
status: JobStatus::Queued as i32,
last_error: None,
friendly_name: data.friendly_name,
client_id: data.client_id,
compute_provider_id: u32::MAX,
pow_difficulty: data.pow_difficulty,
pow_solution_index: u64::MAX,
};
if let Err(e) = job.validate(
self.config.indexes_per_compute_cycle,
self.config.bits_per_index,
) {
error!("job can't be added - validation failed: {}, {}", job, e);
return Err(e);
}
if self.providers_pool.is_empty() {
// all providers busy with in-progress jobs - queue the job
self.pending_jobs.push(job.clone());
info!("all providers are busy - queueing job {}...", job.id);
return Ok(job);
}
let res_job = self.start_task(&job).await?;
Ok(res_job)
}
}
#[message(result = "Result<(Config)>")]
pub(crate) struct GetConfig;
/// Get the current pos compute config
#[async_trait::async_trait]
impl Handler<GetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, _msg: GetConfig) -> Result<Config> {
Ok(self.config.clone())
}
}
#[message(result = "Result<()>")]
pub(crate) struct AbortJob(pub(crate) AbortJobRequest);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<AbortJob> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: AbortJob) -> Result<()> {
let req = msg.0;
if let Some(_job) = self.jobs.get(&req.id) {
// todo: abort on-going job - need to do this via sending a message the blocking task
if req.delete_data {
// todo: attempt to delete all job files in store (best effort)
}
}
if req.delete_job {
// remove job
if let Some(idx) = self.pending_jobs.iter().position(|j| j.id == req.id) {
self.pending_jobs.remove(idx);
}
self.jobs.remove(&req.id);
}
Ok(())
}
}
#[message(result = "Result<()>")]
pub(crate) struct SetConfig(pub(crate) Config);
/// Set the pos compute config
#[async_trait::async_trait]
impl Handler<SetConfig> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: SetConfig) -> Result<()> {
self.config = msg.0;
Ok(())
}
}
/////////////////////////////////////////////
#[message(result = "Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>>")]
pub(crate) struct SubscribeToJobStatuses {}
#[async_trait::async_trait]
impl Handler<SubscribeToJobStatuses> for PosServer {
async fn handle(
&mut self,
_ctx: &mut Context<Self>,
_msg: SubscribeToJobStatuses,
) -> Result<ReceiverStream<Result<JobStatusStreamResponse, Status>>> |
}
/////////////////////////////////////////////
#[message(result = "Result<()>")]
pub(crate) struct StartGrpcService {
pub(crate) port: u32,
pub(crate) host: String,
}
#[async_trait::async_trait]
impl Handler<StartGrpcService> for PosServer {
async fn handle(&mut self, _ctx: &mut Context<Self>, msg: StartGrpcService) -> Result<()> {
let addr = format!("{}:{}", msg.host, msg.port).parse().unwrap();
info!("starting grpc service on: {}...", addr);
// todo: add a grpc health service
tokio::task::spawn(async move {
let res = Server::builder()
.add_service(PosDataServiceServer::new(PosGrpcService::default()))
.serve(addr)
.await;
if res.is_err() {
panic!("grpc server stopped due to error: {:?}", res.err().unwrap());
} else {
info!("grpc server stopped");
}
});
Ok(())
}
}
| {
// create channel for streaming job statuses
let (tx, rx) = mpsc::channel(32);
// store the sender indexed by a new unique id
self.job_status_subscribers.insert(rand::random(), tx);
// return the receiver
Ok(ReceiverStream::new(rx))
} | identifier_body |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if!self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if!self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
| impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
} | random_line_split |
|
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> |
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if!self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if!self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
} | identifier_body |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct HLSoftBody<B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if!self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if!self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius |
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
} | conditional_block |
mod.rs | use self::constants::*;
use super::*;
mod creature;
mod rock;
pub use self::creature::*;
pub use self::rock::*;
use std::cell::{Ref, RefMut};
#[cfg(multithreading)]
type ReferenceCounter = std::sync::Arc;
#[cfg(not(multithreading))]
type ReferenceCounter<A> = std::rc::Rc<A>;
#[cfg(multithreading)]
type MutPoint = std::sync::RwLock;
#[cfg(not(multithreading))]
type MutPoint<A> = std::cell::RefCell<A>;
const COLLISION_FORCE: f64 = 0.01;
const PIECES: usize = 20;
const AGE_FACTOR: f64 = 1.0;
const MATURE_AGE: f64 = 0.01;
/// Higher-Level SoftBody
///
/// This is a wrapper struct providing some useful functions.
///
/// TODO: come up with a better name.
pub struct | <B = Brain>(ReferenceCounter<MutPoint<SoftBody<B>>>);
impl<B> From<SoftBody<B>> for HLSoftBody<B> {
fn from(sb: SoftBody<B>) -> HLSoftBody<B> {
HLSoftBody(ReferenceCounter::new(MutPoint::new(sb)))
}
}
impl<B> Clone for HLSoftBody<B> {
fn clone(&self) -> Self {
HLSoftBody(ReferenceCounter::clone(&self.0))
}
}
impl<B> PartialEq<HLSoftBody<B>> for HLSoftBody<B> {
fn eq(&self, rhs: &HLSoftBody<B>) -> bool {
ReferenceCounter::ptr_eq(&self.0, &rhs.0)
}
}
impl<B> HLSoftBody<B> {
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow(&self) -> RwLockReadGuard<SoftBody<B>> {
return self.0.read().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow(&self) -> Ref<SoftBody<B>> {
return self.0.borrow();
}
/// Wrapper function
#[cfg(multithreading)]
pub fn borrow_mut(&self) -> RwLockWriteGuard<SoftBody<B>> {
return self.0.write().unwrap();
}
#[cfg(not(multithreading))]
pub fn borrow_mut(&self) -> RefMut<SoftBody<B>> {
return self.0.borrow_mut();
}
/// Returns a boolean indicating whether this `HLSoftBody` is currently borrowed, useful for debugging.
#[cfg(multithreading)]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_write().is_ok();
}
#[cfg(not(multithreading))]
pub fn can_borrow_mut(&self) -> bool {
return self.0.try_borrow_mut().is_ok();
}
/// Consume this thing and return the value it holds
#[cfg(multithreading)]
pub fn into_inner(self) -> SoftBody<B> {
self.0.into_inner().unwrap()
}
#[cfg(not(multithreading))]
pub fn into_inner(self) -> SoftBody<B> {
use std::rc::Rc;
match Rc::try_unwrap(self.0) {
Ok(n) => n.into_inner(),
Err(_e) => panic!("Could not unwrap Rc."),
}
}
/// Calls the same function on all types and updates `SoftBodiesInPositions` by calling `set_sbip`.
pub fn apply_motions(
&self,
time_step: f64,
board_size: BoardSize,
terrain: &Terrain,
sbip: &mut SoftBodiesInPositions<B>,
) {
use std::ops::DerefMut;
self.borrow_mut()
.deref_mut()
.apply_motions(time_step, terrain, board_size);
self.set_sbip(sbip, board_size);
}
/// Updates `SoftBodiesInPositions` and updates itself by calling `update_sbip_variables()`.
pub fn set_sbip(&self, sbip: &mut SoftBodiesInPositions<B>, board_size: BoardSize) {
// TODO: Look for optimizations here by cleaning and filling sbip more intelligently.
let mut self_borrow = self.borrow_mut();
self_borrow.update_sbip_variables(board_size);
if self_borrow.moved_between_tiles() {
for x in self_borrow.previous_x_range() {
for y in self_borrow.previous_y_range() {
// Prevents deleting tiles we are currently in.
if!self_borrow.is_in_tile(x, y) {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
for x in self_borrow.current_x_range() {
for y in self_borrow.current_y_range() {
// Prevents duplicate entries.
if!self_borrow.was_in_tile(x, y) {
sbip.add_soft_body_at(x, y, self.clone());
}
}
}
}
}
/// Completely removes this `HLSoftBody` from `sbip`.
///
/// NOTE: `HLSoftBody` is added again when `set_sbip` is called.
pub fn remove_from_sbip(&mut self, sbip: &mut SoftBodiesInPositions<B>) {
for x in self.borrow().current_x_range() {
for y in self.borrow().current_y_range() {
sbip.remove_soft_body_at(x, y, self.clone());
}
}
}
/// Checks for collision and adjusts velocity if that's the case.
///
/// TODO: clean up the many uses of `borrow()`
pub fn collide(&self, sbip: &SoftBodiesInPositions<B>) {
let mut self_br = self.borrow_mut();
let mut colliders = self_br.get_colliders(sbip);
// Remove self, if you don't do this then the program will crash because you're borrowing self twice.
colliders.remove_softbody(self.clone());
let self_px = self_br.get_px();
let self_py = self_br.get_py();
let self_radius = self_br.get_radius();
let self_mass = self_br.get_mass();
for collider_rc in colliders {
let collider = collider_rc.borrow();
let (collider_px, collider_py) = (collider.get_px(), collider.get_py());
let distance = distance(self_px, self_py, collider_px, collider_py);
let combined_radius = self_radius + collider.get_radius();
if distance < combined_radius {
let force = combined_radius * COLLISION_FORCE;
let add_vx = (self_px - collider_px) / distance * force / self_mass;
let add_vy = (self_py - collider_py) / distance * force / self_mass;
// This is where self is needed to be borrowed mutably.
self_br.add_vx(add_vx);
self_br.add_vy(add_vy);
}
}
}
/// This function requires a reference to a `Board`.
/// This is usually impossible so you'll have to turn to `unsafe`.
pub fn return_to_earth(
&mut self,
time: f64,
board_size: BoardSize,
terrain: &mut Terrain,
climate: &Climate,
sbip: &mut SoftBodiesInPositions<B>,
) {
// To keep the borrowchecker happy.
{
let self_deref = self.borrow_mut();
for _i in 0..PIECES {
let tile_pos = self_deref.get_random_covered_tile(board_size);
terrain.add_food_or_nothing_at(tile_pos, self_deref.get_energy() / PIECES as f64);
terrain.update_at(tile_pos, time, climate);
}
}
self.remove_from_sbip(sbip);
}
}
impl<B: Intentions> HLSoftBody<B> {
fn wants_primary_birth(&self, time: f64) -> bool {
let temp = self.borrow();
temp.get_energy() > SAFE_SIZE
&& temp.brain.wants_birth() > 0.0
&& temp.get_age(time) > MATURE_AGE
}
}
impl<B: NeuralNet + Intentions + RecombinationInfinite> HLSoftBody<B> {
/// Returns a new creature if there's a birth, otherwise returns `None`
// TODO: cleanup
pub fn try_reproduce(
&mut self,
time: f64,
sbip: &mut SoftBodiesInPositions<B>,
board_size: BoardSize,
) -> Option<HLSoftBody<B>> {
if self.wants_primary_birth(time) {
let self_px = self.borrow().get_px();
let self_py = self.borrow().get_py();
let self_radius = self.borrow().get_radius();
let mut colliders = self.borrow().get_colliders(sbip);
// Remove self
colliders.remove_softbody(self.clone());
let mut parents: Vec<HLSoftBody<B>> = colliders
.into_iter()
.filter(|rc_soft| {
let c = rc_soft.borrow();
let dist = distance(self_px, self_py, c.get_px(), c.get_py());
let combined_radius = self_radius * FIGHT_RANGE + c.get_radius();
c.brain.wants_help_birth() > -1.0 // must be a willing creature
&& dist < combined_radius // must be close enough
// TODO: find out if this addition to the Processing code works
// && c.get_age(time) >= MATURE_AGE // creature must be old enough
// && c.base.get_energy() > SAFE_SIZE
})
.collect();
parents.push(self.clone());
let available_energy = parents
.iter()
.fold(0.0, |acc, c| acc + c.borrow().get_baby_energy());
if available_energy > BABY_SIZE {
let energy = BABY_SIZE;
// Giving birth costs energy
parents.iter_mut().for_each(|c| {
let mut c = c.borrow_mut();
let energy_to_lose = energy * (c.get_baby_energy() / available_energy);
c.lose_energy(energy_to_lose);
});
let sb = HLSoftBody::from(Creature::new_baby(parents, energy, time));
sb.set_sbip(sbip, board_size);
sb.set_sbip(sbip, board_size);
// Hooray! Return the little baby!
Some(sb)
} else {
// There isn't enough energy available
None
}
} else {
// This creature can't give birth because of age, energy or because it doesn't want to.
return None;
}
}
}
pub type SoftBody<B = Brain> = Creature<B>;
// Here are all the functions only applicable to `Creature`s.
impl<B> SoftBody<B> {
/// Performs the energy requirement to keep living.
pub fn metabolize(&mut self, time_step: f64, time: f64) {
// TODO: fix ugly code.
let age = AGE_FACTOR * (time - self.get_birth_time());
let creature = self;
let energy_to_lose = creature.get_energy() * METABOLISM_ENERGY * age * time_step;
creature.lose_energy(energy_to_lose);
// Creature should die if it doesn't have enough energy, this is done by `Board`.
}
}
| HLSoftBody | identifier_name |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int |
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year!= 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100!= 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
//! =================
//! SUBROUTINE NTODAT
//! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE DTODAT
//! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE MOMINT
//! =================
//
//! Compute month indices and weights for time interpolation from
//! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight! interpolation weight
//
// integer :: idatim(7)! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
//! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
//! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
//! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7)! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0)! median day a
//
//! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1! next month (maybe 13)
// else
// kmonb = kmona - 1! previous month (maybe 0)
// endif
//
//! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7)! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0)! median day b
//
//! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
} | identifier_body |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d | else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year!= 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100!= 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
//! =================
//! SUBROUTINE NTODAT
//! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE DTODAT
//! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE MOMINT
//! =================
//
//! Compute month indices and weights for time interpolation from
//! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight! interpolation weight
//
// integer :: idatim(7)! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
//! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
//! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
//! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7)! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0)! median day a
//
//! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1! next month (maybe 13)
// else
// kmonb = kmona - 1! previous month (maybe 0)
// endif
//
//! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7)! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0)! median day b
//
//! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} | conditional_block |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1,
ny001d: 365,
nud: 6,
n_days_per_month: 30,
n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn | (
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year!= 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100!= 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
//! =================
//! SUBROUTINE NTODAT
//! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE DTODAT
//! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE MOMINT
//! =================
//
//! Compute month indices and weights for time interpolation from
//! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight! interpolation weight
//
// integer :: idatim(7)! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
//! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
//! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
//! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7)! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0)! median day a
//
//! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1! next month (maybe 13)
// else
// kmonb = kmona - 1! previous month (maybe 0)
// endif
//
//! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7)! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0)! median day b
//
//! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
//
| cal2step | identifier_name |
calendar.rs | use crate::{FloatNum, Int};
use std::ops::Rem;
#[derive(Clone)]
pub struct Calendar {
pub mondays: [Int; 13],
pub mona365: [Int; 13],
pub monaccu: [Int; 13],
pub ny400d: Int,
pub ny100d: Int,
pub ny004d: Int,
pub ny001d: Int,
pub nud: Int,
// TODO?
// These values are copied from pumamod in subroutine calini
pub n_days_per_month: Int,
pub n_days_per_year: Int,
pub n_start_step: Int,
pub ntspd: Int,
pub solar_day: FloatNum,
}
impl Default for Calendar {
fn default() -> Self {
Self {
mondays: [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
mona365: [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365],
monaccu: [0; 13],
ny400d: 400 * 365 + 97,
ny100d: 100 * 365 + 24,
ny004d: 4 * 365 + 1, | n_days_per_year: 360,
n_start_step: 0,
ntspd: 1,
solar_day: 86400.0, // sec
}
}
}
fn yday2mmdd(cal: &Calendar, mut kyday: &mut Int, mut kmon: &mut Int, mut kday: &mut Int) {
if cal.n_days_per_year == 365 {
*kmon = 1;
while *kyday > cal.mona365[*kmon as usize] {
*kmon = *kmon + 1;
}
*kday = *kyday - cal.monaccu[*kmon as usize - 1];
} else {
*kmon = (*kyday - 1) / cal.n_days_per_month;
*kday = *kyday - cal.n_days_per_month * *kmon;
}
}
fn nweekday(kday: Int) -> Int {
(kday + 5).rem(7) as Int
}
pub fn ndayofyear(kstep: Int, cal: &mut Calendar) -> Int {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.monaccu[idatim[1] as usize - 1];
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
return idatim[2] + cal.n_days_per_month * (idatim[1] - 1);
}
0
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal(kstep: Int, ktspd: Int, kdatim: &mut [Int; 7], mut cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let istp: Int;
let iy400: Int;
let id400: Int;
let iy100: Int;
let id100: Int;
let iy004: Int;
let id004: Int;
let iy001: Int;
let mut id001: Int;
let jmon: Int;
let leap: bool;
idall = kstep / ktspd;
iy400 = idall / cal.ny400d; // segment (of 400 years)
id400 = idall.rem(cal.ny400d);
if id400 <= cal.ny100d {
// century year is leap year
iy100 = 0; // century in segment [0]
id100 = id400;
iy004 = id100 / cal.ny004d; // tetrade in century [0..24]
id004 = id100.rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d; // year in tetrade [1,2,3]
id001 = (id004 - 1).rem(cal.ny001d);
}
} else {
// century year is not leap year
iy100 = (id400 - 1) / cal.ny100d; // century in segment [1,2,3]
id100 = (id400 - 1).rem(cal.ny100d);
if id100 < cal.ny004d - 1 {
iy004 = 0; // tetrade in century [0]
id004 = id100;
leap = false;
iy001 = id004 / cal.ny001d; // year in tetrade [1,2,3]
id001 = id004.rem(cal.ny001d);
} else {
iy004 = (id100 + 1) / cal.ny004d; // tetrade in century [1..24]
id004 = (id100 + 1).rem(cal.ny004d);
leap = id004 <= cal.ny001d;
if leap {
iy001 = 0; // year in tetrade [0]
id001 = id004;
} else {
iy001 = (id004 - 1) / cal.ny001d;
id001 = (id004 - 1).rem(cal.ny001d);
}
}
}
iyea = iy400 * 400 + iy100 * 100 + iy004 + 4 + iy001;
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1] + cal.mondays[2];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
imon = 1;
id001 = id001 + 1;
while id001 > cal.monaccu[imon as usize] {
imon = imon + 1;
}
iday = id001 - cal.monaccu[imon as usize - 1];
istp = kstep.rem(ktspd);
imin = (istp * 1440) / ktspd;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = (kstep / ktspd + 5).rem(7); // day of week
if leap {
kdatim[6] = 1;
} else {
kdatim[6] = 0;
}
}
}
fn cal2step(
ktspd: Int, // time steps per day
kyea: Int, // current year of simulation
kmon: Int, // current month of simulation
kday: Int, // current day of simulation
khou: Int, // current hour of simulation
kmin: Int, // current minute of simulation
mut kstep: &mut Int, //time step since simulation start
mut cal: &mut Calendar,
) {
let mut idall: Int;
let mut ilp: Int;
let mut iy400: Int;
let mut id400: Int;
let mut iy100: Int;
let mut id100: Int;
let mut iy004: Int;
let mut id004: Int;
let mut jmon: Int;
let leap: bool;
// simplified calendar
if cal.n_days_per_year!= 365 {
*kstep =
ktspd * (kyea * cal.n_days_per_year + (kmon - 1) * cal.n_days_per_month + kday - 1);
return;
}
iy400 = kyea / 400; // segment [400]
id400 = kyea.rem(400); // year in segment [0..399]
iy100 = id400 / 100; // century [0,1,2,3]
id100 = id400.rem(100); // year in century [0..99]
iy004 = id100 / 4; // tetrade [0..24]
id004 = id100.rem(4); // year in tetrade [0,1,2,3]
leap = (id004 == 0 && (id100!= 0 || id400 == 0));
ilp = -1;
if id004 > 0 {
ilp = ilp + 1;
}
if iy100 > 0 && id100 == 0 {
ilp = ilp + 1;
}
cal.monaccu[0] = cal.mondays[0];
cal.monaccu[1] = cal.mondays[1];
cal.monaccu[2] = cal.mondays[1];
if leap {
cal.monaccu[2] = cal.monaccu[2] + 1;
}
for jmon in 3..13 {
cal.monaccu[jmon] = cal.monaccu[jmon - 1] + cal.mondays[jmon];
}
idall = iy400 * cal.ny400d
+ iy100 * cal.ny100d
+ iy004 * cal.ny004d
+ id004 * cal.ny001d
+ cal.monaccu[kmon as usize - 1]
+ kday
+ ilp;
*kstep = ktspd * idall + (ktspd * (khou * 60 + kmin)) / 1440;
}
// kstep ! time step since simulation start
// ktspd ! time steps per day
// kdatim(7)! year,month,day,hour,min,weekday,leapyear
fn step2cal30(kstep: Int, ktspd: Int, mut kdatim: &mut [Int; 7], cal: &mut Calendar) {
let mut iyea: Int; // current year of simulation
let mut imon: Int; // current month of simulation
let mut iday: Int; // current day of simulation
let mut ihou: Int; // current hour of simulation
let mut imin: Int; // current minute of simulation
let mut idall: Int;
let mut istp: Int;
idall = kstep / ktspd;
iyea = idall / cal.n_days_per_year;
idall = idall.rem(cal.n_days_per_year);
imon = idall / cal.n_days_per_month + 1;
iday = idall.rem(cal.n_days_per_month) + 1;
istp = kstep.rem(ktspd);
imin = ((istp as FloatNum * cal.solar_day) / (ktspd * 60) as FloatNum) as Int;
ihou = imin / 60;
imin = imin.rem(60);
kdatim[0] = iyea;
kdatim[1] = imon;
kdatim[2] = iday;
kdatim[3] = ihou;
kdatim[4] = imin;
kdatim[5] = 0; // day of week
kdatim[6] = 0; // leap year
}
pub fn ntomin(
kstep: Int,
mut kmin: &mut Int,
mut khou: &mut Int,
mut kday: &mut Int,
mut kmon: &mut Int,
mut kyea: &mut Int,
mut cal: &mut Calendar,
) {
let mut idatim = [0; 7];
if cal.n_days_per_year == 365 {
step2cal(kstep, cal.ntspd, &mut idatim, cal);
} else {
step2cal30(kstep, cal.ntspd, &mut idatim, cal);
}
*kyea = idatim[0];
*kmon = idatim[1];
*kday = idatim[2];
*khou = idatim[3];
*kmin = idatim[4];
}
fn ntodat(istep: Int, datch: &mut String, cal: &mut Calendar) {
let mona = vec![
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut imin: Int = 1;
let mut ihou: Int = 1;
let mut iday: Int = 1;
let mut imon: Int = 1;
let mut iyea: Int = 1;
ntomin(
istep, &mut imin, &mut ihou, &mut iday, &mut imon, &mut iyea, cal,
);
*datch = format!(
"{}-{}-{} {}:{}",
iday,
mona[imon as usize - 1],
iyea,
ihou,
imin
)
}
//! =================
//! SUBROUTINE NTODAT
//! =================
//
// subroutine ntodat(istep,datch)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// call ntomin(istep,imin,ihou,iday,imon,iyea)
// write (datch,20030) iday,mona(imon),iyea,ihou,imin
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE DTODAT
//! =================
//
// subroutine dtodat(id,datch)
// integer :: id(6)
// character(len=18) datch
// character(len=3) mona(12)
// data mona /'Jan','Feb','Mar','Apr','May','Jun', &
// & 'Jul','Aug','Sep','Oct','Nov','Dec'/
// write (datch,20030) id(3),mona(id(2)),id(1),id(4),id(5)
// 20030 format(i2,'-',a3,'-',i4.4,2x,i2,':',i2.2)
// end
//
//
//! =================
//! SUBROUTINE MOMINT
//! =================
//
//! Compute month indices and weights for time interpolation from
//! monthly mean data to current timestep
//
// subroutine momint(kperp,kstep,kmona,kmonb,pweight)
// use calmod
// implicit none
// integer, intent(in ) :: kperp ! perpetual mode?
// integer, intent(in ) :: kstep ! target step
// integer, intent(out) :: kmona ! current month (1-12)
// integer, intent(out) :: kmonb ! next or previous month (0-13)
// real , intent(out) :: pweight! interpolation weight
//
// integer :: idatim(7)! date time array
// integer :: iday
// integer :: ihour
// integer :: imin
// integer :: jmonb ! next or previous month (1-12)
//
// real :: zday ! fractional day (including hour & minute)
// real :: zdpma ! days per month a
// real :: zdpmb ! days per month b
// real :: zmeda ! median day of the month a
// real :: zmedb ! median day of the month b
//
//! convert time step to date / time
//
// idatim(:) = 0
// if (kperp > 0) then ! perpetual date
// call yday2mmdd(kperp,idatim(2),idatim(3))
// else if (n_days_per_year == 365) then ! real calendar
// call step2cal(kstep,ntspd,idatim)
// else ! simple calendar
// call step2cal30(kstep,ntspd,idatim)
// endif
//
// kmona = idatim(2)
// iday = idatim(3)
// ihour = idatim(4)
// imin = idatim(5)
//
//! set fractional day
//
// zday = iday + ((ihour * 60.0 + imin) * 60.0) / solar_day
//
//! compute median of month a
//
// zdpma = n_days_per_month
// if (n_days_per_year == 365) then
// zdpma = mondays(kmona)
// if (kmona == 2) zdpma = zdpma + idatim(7)! leap year
// endif
// zmeda = 0.5 * (zdpma + 1.0)! median day a
//
//! define neighbour month
//
// if (zday > zmeda) then
// kmonb = kmona + 1! next month (maybe 13)
// else
// kmonb = kmona - 1! previous month (maybe 0)
// endif
//
//! compute median of month b
//
// zdpmb = n_days_per_month
// if (n_days_per_year == 365) then
// jmonb = mod(kmonb+11,12) + 1! convert month (0-13) -> (1-12)
// zdpmb = mondays(jmonb)
// if (jmonb == 2) zdpmb = zdpmb + idatim(7)! leap year
// endif
// zmedb = 0.5 * (zdpmb + 1.0)! median day b
//
//! compute weight
//
// pweight = abs(zday - zmeda) / (zmeda + zmedb - 1.0)
//
// return
// end subroutine momint
// | ny001d: 365,
nud: 6,
n_days_per_month: 30, | random_line_split |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latitude;
let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32
})
.collect();
} else {
own_data = dt_slice
.iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
/ | // println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if!el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd!= crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
}
| / // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d != &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
| conditional_block |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latit | let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32
})
.collect();
} else {
own_data = dt_slice
.iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
// // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d!= &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
// println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if!el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd!= crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
}
| ude;
| identifier_name |
wsr98d_reader.rs | use crate::MetError;
use crate::STRadialData;
use binread::prelude::*;
use chrono::NaiveDateTime;
use encoding_rs::*;
use std::cmp::PartialEq;
use std::collections::HashMap;
use std::io::Cursor;
const DATA_TYPE: [&'static str; 37] = [
"dBT", "dBZ", "V", "W", "SQI", "CPA", "ZDR", "LDR", "CC", "PDP", "KDP", "CP", "Reserved",
"HCL", "CF", "SNR", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
"Reserved", "Zc", "Vc", "Wc", "ZDRc", "FFT", "VIL",
];
#[derive(Debug, BinRead)]
struct CommonBlock {
magic_num: i32, //魔术字 固定标志,用来指示雷达数据文件。
major_version: u16, //主版本号
minor_version: u16, //次版本号
generic_type: i32, //文件类型 1–基数据文件; 2–气象产品文件; 3–谱数据文件;
product_type: i32, //产品类型 文件类型为1时此字段无效。
#[br(count = 16)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct SiteInfo {
#[br(count = 8)]
site_code_: Vec<u8>,
#[br(calc=GBK.decode(&site_code_).0.trim_end_matches('\u{0}').to_string())]
site_code: String,
#[br(count = 32)]
site_name_: Vec<u8>,
#[br(calc=GBK.decode(&site_name_).0.trim_end_matches('\u{0}').to_string())]
site_name: String,
latitude: f32,
longtitude: f32,
antena_height: i32, //天线高
ground_height: i32, //雷达塔楼地面海拔高度
frequency: f32,
beam_width_h: f32, //水平波束宽
beam_width_v: f32, //垂直波束宽
radar_version: i32, //雷达数据采集软件版本号
radar_type: u16, //1–SA
// 2–SB
// 3–SC
// 33–CA
// 34–CB
// 35–CC
// 36–CCJ
// 37–CD
// 65–XA
// 66–KA
// 67–W
antenna_gain: i16,
trans_loss: i16,
recv_loss: i16,
other_loss: i16,
#[br(count = 46)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct TaskInfo {
#[br(count = 32)]
task_name: Vec<u8>,
#[br(count = 128)]
task_des: Vec<u8>,
polarization_type: i32, //1 – 水平极化 2 – 垂直极化 3 – 水平/垂直同时 4 – 水平/垂直交替
scan_type: i32, //0 – 体扫 1–单层PPI 2 – 单层RHI 3 – 单层扇扫 4 – 扇体扫 5 – 多层RHI 6 – 手工扫描 7 – 垂直扫描
pulse_width: i32,
start_time_: i32,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%Y%m%d").to_string()})]
start_date: String,
#[br(calc={let t = NaiveDateTime::from_timestamp(start_time_ as i64,0);t.format("%H%M%S").to_string()})]
start_time: String,
cut_num: i32,
noise_h: f32,
noise_v: f32,
cali_h: f32,
cali_v: f32,
h_noise_t: f32,
v_noise_t: f32,
zdr_cali: f32,
phidp_cali: f32,
ldr_cali: f32,
// pulse_width2: f32,
// pulse_width3: f32,
// pulse_width4: f32,
#[br(count = 40)]
reserved: Vec<u8>,
}
#[derive(Debug, BinRead)]
struct CutInfo {
process_mode: i32,
wave_form: i32, //0 – CS连续监测
// 1 – CD连续多普勒
// 2 – CDX多普勒扩展
// 3 – Rx Test
// 4 – BATCH批模式
// 5 – Dual PRF双PRF
// 6 - Staggered PRT 参差PRT
// 7 - single PRF 单PRF
// 8 –linear 线性调频
// 9 - phase encoding 相位编码
prf1: f32,
prf2: f32,
deal_mod: i32,
az: f32,
elev: f32,
start_az: f32,
end_az: f32,
ang_res: f32,
scan_speed: f32,
log_res: i32,
dop_res: i32,
max_range1: i32,
max_range2: i32,
start_range: i32,
sample1: i32,
sample2: i32,
phase_mod: i32,
at_loss: f32,
ny_speed: f32,
moments_mask: i64,
moments_size_mask: i64,
mis_filter_mask: i32,
sqi: f32,
sig: f32,
csr: f32,
log: f32,
cpa: f32,
pmi: f32,
dplog: f32,
#[br(count = 4)]
r: Vec<u8>,
dbt_mask: i32,
dbz_mask: i32,
v_mask: i32,
w_mask: i32,
dp_mask: i32,
#[br(count = 12)]
mask_reserved: Vec<u8>,
scan_sync: i32,
direction: i32,
ground_clutter_type: u16,
ground_clutter_filter_type: u16,
ground_clutter_width: u16,
ground_clutter_filter_win: i16,
pulse_width: u16,
pulse_width1: u16,
pulse_width2: u16,
pulse_width3: u16,
pulse_width4: u16,
#[br(count = 62)]
reserved: Vec<u8>,
}
impl PartialEq for CutInfo {
fn eq(&self, other: &Self) -> bool {
self.elev == other.elev
}
}
#[derive(Debug, BinRead, Clone)]
struct DataInfo {
radial_state: i32,
spot_blank: i32,
seq_num: i32,
rad_num: i32,
elev_num: i32,
az: f32,
el: f32,
sec: i32,
micro_sec: i32,
data_len: i32,
moment_num: i32,
last_sec: i32,
fft_point: i16,
acc_power: i16,
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count=moment_num)]
data_block: Vec<DataBlock>,
}
impl PartialEq for DataInfo {
fn eq(&self, other: &Self) -> bool {
self.el == other.el
}
}
#[derive(Debug, BinRead, Clone)]
struct DataBlock {
data_type_: i32,
#[br(calc=if data_type_>0 && data_type_ <37 {String::from(DATA_TYPE[data_type_ as usize-1])}else {String::from("UNKNOWN")})]
data_type: String,
scale: i32,
offset: i32,
pub bin_len: u16, //一个库的字节数 。2为两个字节
flag: u16,
pub len: i32, //库长
#[br(count = 12)]
reserved: Vec<u8>,
#[br(count= len)]
data: Vec<u8>,
}
pub struct WSR98DReader;
impl WSR98DReader {
pub fn new(buf: &[u8]) -> Result<STRadialData, MetError> {
println!("parse standard radar");
let mut cursor = Cursor::new(buf);
let h: CommonBlock = cursor.read_le()?;
// dbg!(&h);
let h: SiteInfo = cursor.read_le()?;
// dbg!(&h);
let site_code = h.site_code.clone();
let site_name = h.site_name.clone();
let latitude = h.latitude;
let longtitude = h.longtitude;
let antena_height = h.antena_height;
let ground_height = h.ground_height;
let h: TaskInfo = cursor.read_le()?;
let start_date = h.start_date.clone();
let start_time = h.start_time.clone();
// dbg!(&h);
let cut_num = h.cut_num;
let mut cut_infos = Vec::with_capacity(cut_num as usize * 256);
let mut idx_el = Vec::new();
for i in 0..cut_num {
let h: CutInfo = cursor.read_le()?;
idx_el.push((i + 1, h.elev));
// println!("{:?}", h);
cut_infos.push(h);
}
println!("{:?}", idx_el);
// cut_infos.dedup();
// for c in cut_infos.iter() {
// println!("{:?}", c);
// }
// println!("{:?}", cut_infos.len());
let log_res = cut_infos[0].log_res;
let dop_res = cut_infos[0].log_res;
let max_range1 = cut_infos[0].max_range1;
let max_range2 = cut_infos[0].max_range2;
println!(
"log_rs {} dop_res {} max_range1 {} max_range2 {}",
log_res, dop_res, max_range1, max_range2
);
let mut data_infos = Vec::new();
loop {
if let Ok(d) = cursor.read_le::<DataInfo>() {
let radial_state = d.radial_state;
data_infos.push(d);
if radial_state == 4 || radial_state == 6 {
println!("sweep end");
break;
}
} else {
break;
}
}
let bin_num = data_infos[0].data_block[0].len;
// dbg!(data_infos.len(), bin_num);
let data = convert2radial(data_infos, &cut_infos);
let dist = bin_num as f32 * log_res as f32;
dbg!(dist);
Ok(STRadialData {
_extents: (-dist, dist, -dist, dist),
site_code,
site_name,
latitude,
longtitude,
antena_height,
ground_height,
start_date,
start_time,
log_res,
dop_res,
idx_el,
data,
bin_num,
})
}
}
fn convert2radial(
data_infos: Vec<DataInfo>,
cut_infos: &Vec<CutInfo>,
) -> HashMap<i32, Vec<(f32, f32, HashMap<String, Vec<f32>>)>> {
let mut sweep_start_ray_index = Vec::new();
let mut sweep_end_ray_index = Vec::new();
for (i, d) in data_infos.iter().enumerate() {
let state = d.radial_state;
if state == 0 || state == 3 {
sweep_start_ray_index.push(i)
}
if state == 2 || state == 4 {
sweep_end_ray_index.push(i);
}
// println!("{:#?}", d);
}
// println!("start_index {:?}", sweep_start_ray_index);
// println!("end_index {:?}", sweep_end_ray_index);
let start_end = sweep_start_ray_index.iter().zip(sweep_end_ray_index.iter());
let mut data_infos = data_infos;
//elv index from 1-> az ->data_type->data
let mut el_az_dt_data = HashMap::new();
let mut sorted_data = Vec::new();
for (s, e) in start_end {
let d = &mut data_infos[*s..=*e];
d.sort_by(|a, b| a.az.partial_cmp(&b.az).unwrap());
sorted_data.extend_from_slice(d);
}
for dd in sorted_data.iter() {
// println!(
// "el {:?} {} az {:?} {} ",
// dd.el, dd.elev_num, dd.az, dd.moment_num
// );
let mut dt_data = HashMap::new();
for ddd in &dd.data_block {
// println!(" {} {}", ddd.data_type, ddd.len / ddd.bin_len as i32);
let mut own_data: Vec<f32>; // = Vec::with_capacity(ddd.len as usize);
let dt_slice = &ddd.data;
let offset = ddd.offset;
let scale = ddd.scale;
if ddd.bin_len == 2 {
own_data = dt_slice
.chunks_exact(2)
.map(|v| {
let vv = v.as_ref();
let vv = i16::from_le_bytes([vv[0], vv[1]]);
// vv as f32
if vv < 5 {
return crate::MISSING;
}
(vv - offset as i16) as f32 / scale as f32 | .iter()
.map(|v| {
if *v < 5 {
return crate::MISSING;
}
(*v as f32 - offset as f32) / scale as f32
// *v as f32
})
.collect();
}
// if &ddd.data_type == "dBT" {
// // let print_data: Vec<&f32> =
// // own_data.iter().filter(|d| d!= &&crate::MISSING).collect();
// println!(
// "{:?} {:?} {:?} {:?} ",
// dd.el,
// dd.az,
// ddd.data_type.clone(),
// own_data
// );
// }
// println!("{:?}",own_data);
dt_data.insert(ddd.data_type.clone(), own_data);
}
let key = dd.elev_num;
let el = cut_infos[dd.elev_num as usize - 1].elev;
if!el_az_dt_data.contains_key(&key) {
el_az_dt_data.insert(key, vec![(el, dd.az, dt_data)]);
} else {
let v = el_az_dt_data.get_mut(&key).unwrap();
v.push((el, dd.az, dt_data));
}
}
// let d = &el_az_dt_data[&11];
// for dd in d.iter() {
// println!("{} {} {} ", dd.0, dd.1, dd.2["dBZ"].len());
// let tmp = &dd.2["dBZ"];
// for (i, ddd) in tmp.iter().enumerate() {
// // if *ddd!= crate::MISSING {
// print!("{}_{} ", i as f32 * 0.25, ddd);
// // }
// if i > 200 {
// println!("");
// break;
// }
// }
// }
// println!("keys {:?}", el_az_dt_data.keys());
// for i in 1..=11 {
// println!("keys {:?}", el_az_dt_data[&i][0].0);
// }
el_az_dt_data
} | })
.collect();
} else {
own_data = dt_slice | random_line_split |
buffer_geometry.rs | extern crate uuid;
extern crate heck;
extern crate specs;
use self::uuid::Uuid;
use self::heck::ShoutySnakeCase;
use std::vec::Vec;
use std::fmt;
use std::sync::{Arc,Mutex, LockResult, MutexGuard};
use std::mem;
use std::error::Error;
use self::specs::{Component, VecStorage};
use math::{
Vector,
Vector2,
Vector3,
Vector4,
Matrix2,
Matrix3,
Matrix4,
};
use core::{
BBox3,
};
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub enum BufferData {
Matrix2(Vec<Matrix2<f32>>),
Matrix3(Vec<Matrix3<f32>>),
Matrix4(Vec<Matrix4<f32>>),
Vector2(Vec<Vector2<f32>>),
Vector3(Vec<Vector3<f32>>),
Vector4(Vec<Vector4<f32>>),
F32(Vec<f32>),
I32(Vec<i32>),
U32(Vec<u32>),
I16(Vec<i16>),
U16(Vec<u16>),
I8(Vec<i8>),
U8(Vec<u8>),
}
impl BufferData {
pub fn item_size(&self) -> usize {
match self {
BufferData::Matrix2(_) => 4,
BufferData::Matrix3(_) => 9,
BufferData::Matrix4(_) => 16,
BufferData::Vector2(_) => 2,
BufferData::Vector3(_) => 3,
BufferData::Vector4(_) => 4,
BufferData::F32(_) => 1,
BufferData::I32(_) => 1,
BufferData::U32(_) => 1,
BufferData::I16(_) => 1,
BufferData::U16(_) => 1,
BufferData::I8(_) => 1,
BufferData::U8(_) => 1,
}
}
pub fn len(&self) -> usize {
match self {
BufferData::Matrix2(a) => a.len(),
BufferData::Matrix3(a) => a.len(),
BufferData::Matrix4(a) => a.len(),
BufferData::Vector2(a) => a.len(),
BufferData::Vector3(a) => a.len(),
BufferData::Vector4(a) => a.len(),
BufferData::F32(a) => a.len(),
BufferData::I32(a) => a.len(),
BufferData::U32(a) => a.len(),
BufferData::I16(a) => a.len(),
BufferData::U16(a) => a.len(),
BufferData::I8(a) => a.len(),
BufferData::U8(a) => a.len(),
}
}
pub fn elem_byte_len(&self) -> usize {
let bytes = match self {
BufferData::Matrix2(_) => mem::size_of::<f32>(),
BufferData::Matrix3(_) => mem::size_of::<f32>(),
BufferData::Matrix4(_) => mem::size_of::<f32>(),
BufferData::Vector2(_) => mem::size_of::<f32>(),
BufferData::Vector3(_) => mem::size_of::<f32>(),
BufferData::Vector4(_) => mem::size_of::<f32>(),
BufferData::F32(_) => mem::size_of::<f32>(),
BufferData::I32(_) => mem::size_of::<i32>(),
BufferData::U32(_) => mem::size_of::<u32>(),
BufferData::I16(_) => mem::size_of::<i16>(),
BufferData::U16(_) => mem::size_of::<u16>(),
BufferData::I8(_) => mem::size_of::<i8>(),
BufferData::U8(_) => mem::size_of::<u8>(),
};
self.item_size() * bytes
}
pub fn definition(&self) -> String {
match self {
BufferData::Matrix2(_) => "MAT2".to_string(),
BufferData::Matrix3(_) => "MAT3".to_string(),
BufferData::Matrix4(_) => "MAT4".to_string(),
BufferData::Vector2(_) => "VEC2".to_string(),
BufferData::Vector3(_) => "VEC3".to_string(),
BufferData::Vector4(_) => "VEC4".to_string(),
BufferData::F32(_) => "F32".to_string(),
BufferData::I32(_) => "I32".to_string(),
BufferData::U32(_) => "U32".to_string(),
BufferData::I16(_) => "I16".to_string(),
BufferData::U16(_) => "U16".to_string(),
BufferData::I8(_) => "I8".to_string(),
BufferData::U8(_) => "U8".to_string(),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum BufferType {
Position,
Normal,
Tangent,
UV(usize),
Color(usize),
Joint(usize),
Weight(usize),
Other(String),
}
impl BufferType {
pub fn definition(&self) -> String {
match self {
BufferType::Position => "POSITION".to_string(),
BufferType::Normal => "NORMAL".to_string(),
BufferType::Tangent => "TANGENT".to_string(),
BufferType::UV(n) => format!("UV_{}", n),
BufferType::Color(n) => format!("COLOR_{}", n),
BufferType::Joint(n) => format!("JOINT_{}", n),
BufferType::Weight(n) => format!("WEIGHT_{}", n),
BufferType::Other(string) => string.to_shouty_snake_case(),
}
}
}
#[derive(Clone, Debug)]
pub struct BufferAttribute {
pub data: BufferData,
pub buffer_type: BufferType,
pub dynamic: bool,
pub normalized: bool,
// pub version: usize,
}
impl BufferAttribute {
pub fn | (&self) -> usize {
let l = self.len();
l / self.item_size()
}
pub fn item_size(&self) -> usize {
self.data.item_size()
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn set_normalized(&mut self, normalized: bool) -> &mut Self {
self.normalized = normalized;
self
}
pub fn set_dynamic(&mut self, dynamic: bool) -> &mut Self {
self.dynamic = dynamic;
self
}
pub fn definition(&self) ->String {
format!("VERTEX_{}_{}", self.buffer_type.definition(), self.data.definition())
}
}
#[allow(dead_code)]
#[derive(Hash, Eq, PartialEq, Debug, Clone)]
pub struct BufferGroup {
pub start: usize,
pub material_index: usize,
pub count: usize,
pub name: Option<String>,
}
#[allow(dead_code)]
#[derive(Clone)]
pub struct BufferGeometry {
pub uuid: Uuid,
pub name: String,
pub groups: Vec<BufferGroup>,
pub indices: Vec<u32>,
pub attributes: Vec<BufferAttribute>,
pub buffer_order: Vec<BufferType>,
pub b_box: Option<BBox3<f32>>,
callbacks: Vec<fn(&mut BufferGeometry)>,
}
impl fmt::Debug for BufferGeometry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "=====================
BufferGeometry: {}
uuid: {}
groups: {:?}
b_box: {:?}
callbacks: {}
indices: {:?}
attributes: {:?}
=====================",
self.name,
self.uuid,
self.groups,
self.b_box,
self.callbacks.len(),
self.indices,
self.attributes,
)
}
}
#[allow(dead_code)]
impl BufferGeometry {
pub fn new() -> Self {
Self {
attributes: Vec::new(),
groups: Vec::new(),
indices: Vec::new(),
uuid: Uuid::new_v4(),
callbacks: Vec::new(),
name: "".to_string(),
b_box: None,
buffer_order: vec![BufferType::Position, BufferType::Normal, BufferType::UV(0), BufferType::Color(0), BufferType::Joint(0), BufferType::Weight(0)],
}
}
pub fn iter_attributes<'a>(&'a self) -> impl Iterator<Item= &'a BufferAttribute> {
self.buffer_order.iter()
.map(move |e| self.get_attribute(e.clone()) )
.filter(|e| e.is_some() )
.map(|e| e.unwrap() )
}
// pub fn iter_attributes_mut<'a>(&'a mut self) -> impl Iterator<Item= &'a mut BufferAttribute> {
// self.buffer_order.iter()
// .map(move |e| self.get_attribute_mut(e.clone()) )
// .filter(|e| e.is_some() )
// .map(|e| e.unwrap() )
// }
pub fn set_indices(&mut self, indices: Vec<u32>) -> &mut Self {
self.indices = indices;
self
}
pub fn gen_indices(&mut self) -> Result<(), &str> {
let mut len = 0;
match self.get_attribute(BufferType::Position) {
None => {
return Err("BufferGeometry: cant find position");
}
Some(positions) => {
len = positions.len();
}
};
let indices = (0..len as u32).collect();
self.set_indices(indices);
Ok(())
}
pub fn add_buffer_attribute(
&mut self,
buffer_attribute: BufferAttribute,
) -> &mut BufferAttribute {
let index = self.attributes.iter().position( |attr| attr.buffer_type == buffer_attribute.buffer_type);
if let Some(index) = index {
self.attributes.remove(index);
}
self.attributes.push(buffer_attribute);
if!self.attributes.iter().all( |e| e.len() == self.attributes[0].len() ) {
panic!("BufferGeometry: different buffer length: {}", self.name);
}
let i = self.attributes.len() - 1;
&mut self.attributes[i]
}
pub fn create_buffer_attribute(
&mut self,
buffer_type: BufferType,
data: BufferData,
) -> &mut BufferAttribute {
let buffer_attribute = BufferAttribute {
buffer_type,
data,
normalized: false,
dynamic: false,
// version: 0,
};
self.add_buffer_attribute(buffer_attribute)
}
pub fn on_drop(&mut self, cb: fn(&mut BufferGeometry)) {
self.callbacks.push(cb);
}
pub fn get_attribute(&self, buffer_type: BufferType) -> Option<&BufferAttribute> {
self.attributes.iter().find(|e| e.buffer_type == buffer_type)
}
pub fn has_attribute(&self, buffer_type: BufferType) -> bool {
self.attributes.iter().any(|e| e.buffer_type == buffer_type)
}
pub fn get_attribute_mut(&mut self, buffer_type: BufferType) -> Option<&mut BufferAttribute> {
self.attributes.iter_mut().find(|e| e.buffer_type == buffer_type)
}
pub fn generate_normals(&mut self) {
let mut normals = None;
{
let attribute = self.get_attribute(BufferType::Position).unwrap();
if let BufferData::Vector3(data) = &attribute.data {
let mut calc_normals = vec![Vec::new(); data.len()];
let indices = &self.indices;
let il = indices.len();
let mut i = 0;
while i < il {
let a = &data[ indices[i] as usize];
let b = &data[ indices[i+1] as usize];
let c = &data[ indices[i+2] as usize];
let mut cb = c - b;
let ab = a - b;
cb.cross(&ab);
cb.normalize();
calc_normals[ indices[i] as usize ].push(cb.clone());
calc_normals[ indices[i+1] as usize ].push(cb.clone());
calc_normals[ indices[i+2] as usize ].push(cb);
i+=3;
}
let calc_normals = calc_normals
.iter()
.map(|items|{
if items.len() == 1 {
return items[0].clone();
}
let mut res = Vector3::add_all_vectors(items);
res.normalize();
res
})
.collect();
normals = Some(calc_normals);
}
}
if let Some(normal) = normals {
self.create_buffer_attribute(BufferType::Normal, BufferData::Vector3(normal));
}
}
pub fn duplicate(&self) -> Self {
let mut data = self.clone();
data.uuid = Uuid::new_v4();
data
}
pub fn update_box3 (&mut self) -> Result <(), Box<Error>> {
let mut b_box = None;
if let Some(attr) = self.get_attribute(BufferType::Position) {
if let BufferData::Vector3(positions) = &attr.data {
let mut b = BBox3::new_empty();
b.set_from_array(&positions[..]);
b_box = Some(b);
}
}
if b_box.is_none() {return Err( Box::from("cant update b_box") ); }
self.b_box = b_box;
Ok(())
}
pub fn get_b_box(&mut self) -> Result<BBox3<f32>, Box<Error>> {
if self.b_box.is_some() {
return Ok(self.b_box.as_ref().unwrap().clone())
}
self.update_box3()?;
Ok(self.b_box.as_ref().unwrap().clone())
}
pub fn scale_positions_by_vec(&mut self, v: &Vector3<f32>) -> Option<()> {
if let Some(attr) = self.get_attribute_mut(BufferType::Position) {
if let BufferData::Vector3(positions) = &mut attr.data {
positions
.iter_mut()
.for_each(|e| {
e.multiply(v);
});
return Some(());
}
return None;
}
None
}
pub fn get_vertex_byte_size(&self) -> usize {
self.iter_attributes().map(|attr| attr.data.elem_byte_len()).sum()
}
}
impl Drop for BufferGeometry {
fn drop(&mut self) {
while self.callbacks.len() > 0 {
let cb = self.callbacks.pop().unwrap();
cb(self);
}
}
}
#[derive(Clone)]
pub struct SharedGeometry (Arc<Mutex<BufferGeometry>>);
impl SharedGeometry {
pub fn new(g: BufferGeometry) -> Self {
SharedGeometry(Arc::new(Mutex::new(g)))
}
pub fn lock(&mut self) -> LockResult<MutexGuard<BufferGeometry>> {
self.0.lock()
}
}
impl Component for SharedGeometry {
type Storage = VecStorage<Self>;
}
| count | identifier_name |
buffer_geometry.rs | extern crate uuid;
extern crate heck;
extern crate specs;
use self::uuid::Uuid;
use self::heck::ShoutySnakeCase;
use std::vec::Vec;
use std::fmt;
use std::sync::{Arc,Mutex, LockResult, MutexGuard};
use std::mem;
use std::error::Error;
use self::specs::{Component, VecStorage};
use math::{
Vector,
Vector2,
Vector3,
Vector4,
Matrix2,
Matrix3,
Matrix4,
};
use core::{
BBox3,
};
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub enum BufferData {
Matrix2(Vec<Matrix2<f32>>),
Matrix3(Vec<Matrix3<f32>>),
Matrix4(Vec<Matrix4<f32>>),
Vector2(Vec<Vector2<f32>>),
Vector3(Vec<Vector3<f32>>),
Vector4(Vec<Vector4<f32>>),
F32(Vec<f32>),
I32(Vec<i32>),
U32(Vec<u32>),
I16(Vec<i16>),
U16(Vec<u16>),
I8(Vec<i8>),
U8(Vec<u8>),
}
impl BufferData {
pub fn item_size(&self) -> usize {
match self {
BufferData::Matrix2(_) => 4,
BufferData::Matrix3(_) => 9,
BufferData::Matrix4(_) => 16,
BufferData::Vector2(_) => 2,
BufferData::Vector3(_) => 3,
BufferData::Vector4(_) => 4,
BufferData::F32(_) => 1,
BufferData::I32(_) => 1,
BufferData::U32(_) => 1,
BufferData::I16(_) => 1,
BufferData::U16(_) => 1,
BufferData::I8(_) => 1,
BufferData::U8(_) => 1,
}
}
pub fn len(&self) -> usize {
match self {
BufferData::Matrix2(a) => a.len(),
BufferData::Matrix3(a) => a.len(),
BufferData::Matrix4(a) => a.len(),
BufferData::Vector2(a) => a.len(),
BufferData::Vector3(a) => a.len(),
BufferData::Vector4(a) => a.len(),
BufferData::F32(a) => a.len(),
BufferData::I32(a) => a.len(),
BufferData::U32(a) => a.len(),
BufferData::I16(a) => a.len(),
BufferData::U16(a) => a.len(),
BufferData::I8(a) => a.len(),
BufferData::U8(a) => a.len(),
}
}
pub fn elem_byte_len(&self) -> usize {
let bytes = match self {
BufferData::Matrix2(_) => mem::size_of::<f32>(),
BufferData::Matrix3(_) => mem::size_of::<f32>(),
BufferData::Matrix4(_) => mem::size_of::<f32>(),
BufferData::Vector2(_) => mem::size_of::<f32>(),
BufferData::Vector3(_) => mem::size_of::<f32>(),
BufferData::Vector4(_) => mem::size_of::<f32>(),
BufferData::F32(_) => mem::size_of::<f32>(),
BufferData::I32(_) => mem::size_of::<i32>(),
BufferData::U32(_) => mem::size_of::<u32>(),
BufferData::I16(_) => mem::size_of::<i16>(),
BufferData::U16(_) => mem::size_of::<u16>(),
BufferData::I8(_) => mem::size_of::<i8>(),
BufferData::U8(_) => mem::size_of::<u8>(),
};
self.item_size() * bytes
}
pub fn definition(&self) -> String {
match self {
BufferData::Matrix2(_) => "MAT2".to_string(),
BufferData::Matrix3(_) => "MAT3".to_string(),
BufferData::Matrix4(_) => "MAT4".to_string(),
BufferData::Vector2(_) => "VEC2".to_string(),
BufferData::Vector3(_) => "VEC3".to_string(),
BufferData::Vector4(_) => "VEC4".to_string(),
BufferData::F32(_) => "F32".to_string(),
BufferData::I32(_) => "I32".to_string(),
BufferData::U32(_) => "U32".to_string(),
BufferData::I16(_) => "I16".to_string(),
BufferData::U16(_) => "U16".to_string(),
BufferData::I8(_) => "I8".to_string(),
BufferData::U8(_) => "U8".to_string(),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum BufferType {
Position,
Normal,
Tangent,
UV(usize),
Color(usize),
Joint(usize),
Weight(usize),
Other(String),
}
impl BufferType {
pub fn definition(&self) -> String {
match self {
BufferType::Position => "POSITION".to_string(),
BufferType::Normal => "NORMAL".to_string(),
BufferType::Tangent => "TANGENT".to_string(),
BufferType::UV(n) => format!("UV_{}", n),
BufferType::Color(n) => format!("COLOR_{}", n),
BufferType::Joint(n) => format!("JOINT_{}", n),
BufferType::Weight(n) => format!("WEIGHT_{}", n),
BufferType::Other(string) => string.to_shouty_snake_case(),
}
}
}
#[derive(Clone, Debug)]
pub struct BufferAttribute {
pub data: BufferData,
pub buffer_type: BufferType,
pub dynamic: bool,
pub normalized: bool,
// pub version: usize,
}
impl BufferAttribute {
pub fn count(&self) -> usize {
let l = self.len();
l / self.item_size()
}
pub fn item_size(&self) -> usize {
self.data.item_size()
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn set_normalized(&mut self, normalized: bool) -> &mut Self {
self.normalized = normalized;
self
}
pub fn set_dynamic(&mut self, dynamic: bool) -> &mut Self {
self.dynamic = dynamic;
self
}
pub fn definition(&self) ->String |
}
#[allow(dead_code)]
#[derive(Hash, Eq, PartialEq, Debug, Clone)]
pub struct BufferGroup {
pub start: usize,
pub material_index: usize,
pub count: usize,
pub name: Option<String>,
}
#[allow(dead_code)]
#[derive(Clone)]
pub struct BufferGeometry {
pub uuid: Uuid,
pub name: String,
pub groups: Vec<BufferGroup>,
pub indices: Vec<u32>,
pub attributes: Vec<BufferAttribute>,
pub buffer_order: Vec<BufferType>,
pub b_box: Option<BBox3<f32>>,
callbacks: Vec<fn(&mut BufferGeometry)>,
}
impl fmt::Debug for BufferGeometry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "=====================
BufferGeometry: {}
uuid: {}
groups: {:?}
b_box: {:?}
callbacks: {}
indices: {:?}
attributes: {:?}
=====================",
self.name,
self.uuid,
self.groups,
self.b_box,
self.callbacks.len(),
self.indices,
self.attributes,
)
}
}
#[allow(dead_code)]
impl BufferGeometry {
pub fn new() -> Self {
Self {
attributes: Vec::new(),
groups: Vec::new(),
indices: Vec::new(),
uuid: Uuid::new_v4(),
callbacks: Vec::new(),
name: "".to_string(),
b_box: None,
buffer_order: vec![BufferType::Position, BufferType::Normal, BufferType::UV(0), BufferType::Color(0), BufferType::Joint(0), BufferType::Weight(0)],
}
}
pub fn iter_attributes<'a>(&'a self) -> impl Iterator<Item= &'a BufferAttribute> {
self.buffer_order.iter()
.map(move |e| self.get_attribute(e.clone()) )
.filter(|e| e.is_some() )
.map(|e| e.unwrap() )
}
// pub fn iter_attributes_mut<'a>(&'a mut self) -> impl Iterator<Item= &'a mut BufferAttribute> {
// self.buffer_order.iter()
// .map(move |e| self.get_attribute_mut(e.clone()) )
// .filter(|e| e.is_some() )
// .map(|e| e.unwrap() )
// }
pub fn set_indices(&mut self, indices: Vec<u32>) -> &mut Self {
self.indices = indices;
self
}
pub fn gen_indices(&mut self) -> Result<(), &str> {
let mut len = 0;
match self.get_attribute(BufferType::Position) {
None => {
return Err("BufferGeometry: cant find position");
}
Some(positions) => {
len = positions.len();
}
};
let indices = (0..len as u32).collect();
self.set_indices(indices);
Ok(())
}
pub fn add_buffer_attribute(
&mut self,
buffer_attribute: BufferAttribute,
) -> &mut BufferAttribute {
let index = self.attributes.iter().position( |attr| attr.buffer_type == buffer_attribute.buffer_type);
if let Some(index) = index {
self.attributes.remove(index);
}
self.attributes.push(buffer_attribute);
if!self.attributes.iter().all( |e| e.len() == self.attributes[0].len() ) {
panic!("BufferGeometry: different buffer length: {}", self.name);
}
let i = self.attributes.len() - 1;
&mut self.attributes[i]
}
pub fn create_buffer_attribute(
&mut self,
buffer_type: BufferType,
data: BufferData,
) -> &mut BufferAttribute {
let buffer_attribute = BufferAttribute {
buffer_type,
data,
normalized: false,
dynamic: false,
// version: 0,
};
self.add_buffer_attribute(buffer_attribute)
}
pub fn on_drop(&mut self, cb: fn(&mut BufferGeometry)) {
self.callbacks.push(cb);
}
pub fn get_attribute(&self, buffer_type: BufferType) -> Option<&BufferAttribute> {
self.attributes.iter().find(|e| e.buffer_type == buffer_type)
}
pub fn has_attribute(&self, buffer_type: BufferType) -> bool {
self.attributes.iter().any(|e| e.buffer_type == buffer_type)
}
pub fn get_attribute_mut(&mut self, buffer_type: BufferType) -> Option<&mut BufferAttribute> {
self.attributes.iter_mut().find(|e| e.buffer_type == buffer_type)
}
pub fn generate_normals(&mut self) {
let mut normals = None;
{
let attribute = self.get_attribute(BufferType::Position).unwrap();
if let BufferData::Vector3(data) = &attribute.data {
let mut calc_normals = vec![Vec::new(); data.len()];
let indices = &self.indices;
let il = indices.len();
let mut i = 0;
while i < il {
let a = &data[ indices[i] as usize];
let b = &data[ indices[i+1] as usize];
let c = &data[ indices[i+2] as usize];
let mut cb = c - b;
let ab = a - b;
cb.cross(&ab);
cb.normalize();
calc_normals[ indices[i] as usize ].push(cb.clone());
calc_normals[ indices[i+1] as usize ].push(cb.clone());
calc_normals[ indices[i+2] as usize ].push(cb);
i+=3;
}
let calc_normals = calc_normals
.iter()
.map(|items|{
if items.len() == 1 {
return items[0].clone();
}
let mut res = Vector3::add_all_vectors(items);
res.normalize();
res
})
.collect();
normals = Some(calc_normals);
}
}
if let Some(normal) = normals {
self.create_buffer_attribute(BufferType::Normal, BufferData::Vector3(normal));
}
}
pub fn duplicate(&self) -> Self {
let mut data = self.clone();
data.uuid = Uuid::new_v4();
data
}
pub fn update_box3 (&mut self) -> Result <(), Box<Error>> {
let mut b_box = None;
if let Some(attr) = self.get_attribute(BufferType::Position) {
if let BufferData::Vector3(positions) = &attr.data {
let mut b = BBox3::new_empty();
b.set_from_array(&positions[..]);
b_box = Some(b);
}
}
if b_box.is_none() {return Err( Box::from("cant update b_box") ); }
self.b_box = b_box;
Ok(())
}
pub fn get_b_box(&mut self) -> Result<BBox3<f32>, Box<Error>> {
if self.b_box.is_some() {
return Ok(self.b_box.as_ref().unwrap().clone())
}
self.update_box3()?;
Ok(self.b_box.as_ref().unwrap().clone())
}
pub fn scale_positions_by_vec(&mut self, v: &Vector3<f32>) -> Option<()> {
if let Some(attr) = self.get_attribute_mut(BufferType::Position) {
if let BufferData::Vector3(positions) = &mut attr.data {
positions
.iter_mut()
.for_each(|e| {
e.multiply(v);
});
return Some(());
}
return None;
}
None
}
pub fn get_vertex_byte_size(&self) -> usize {
self.iter_attributes().map(|attr| attr.data.elem_byte_len()).sum()
}
}
impl Drop for BufferGeometry {
fn drop(&mut self) {
while self.callbacks.len() > 0 {
let cb = self.callbacks.pop().unwrap();
cb(self);
}
}
}
#[derive(Clone)]
pub struct SharedGeometry (Arc<Mutex<BufferGeometry>>);
impl SharedGeometry {
pub fn new(g: BufferGeometry) -> Self {
SharedGeometry(Arc::new(Mutex::new(g)))
}
pub fn lock(&mut self) -> LockResult<MutexGuard<BufferGeometry>> {
self.0.lock()
}
}
impl Component for SharedGeometry {
type Storage = VecStorage<Self>;
}
| {
format!("VERTEX_{}_{}", self.buffer_type.definition(), self.data.definition())
} | identifier_body |
buffer_geometry.rs | extern crate uuid;
extern crate heck;
extern crate specs;
use self::uuid::Uuid;
use self::heck::ShoutySnakeCase;
use std::vec::Vec;
use std::fmt;
use std::sync::{Arc,Mutex, LockResult, MutexGuard};
use std::mem;
use std::error::Error;
use self::specs::{Component, VecStorage};
use math::{
Vector,
Vector2,
Vector3,
Vector4,
Matrix2,
Matrix3,
Matrix4,
};
use core::{
BBox3,
};
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub enum BufferData {
Matrix2(Vec<Matrix2<f32>>),
Matrix3(Vec<Matrix3<f32>>),
Matrix4(Vec<Matrix4<f32>>),
Vector2(Vec<Vector2<f32>>),
Vector3(Vec<Vector3<f32>>),
Vector4(Vec<Vector4<f32>>),
F32(Vec<f32>),
I32(Vec<i32>),
U32(Vec<u32>),
I16(Vec<i16>),
U16(Vec<u16>),
I8(Vec<i8>),
U8(Vec<u8>),
}
impl BufferData {
pub fn item_size(&self) -> usize {
match self {
BufferData::Matrix2(_) => 4,
BufferData::Matrix3(_) => 9,
BufferData::Matrix4(_) => 16,
BufferData::Vector2(_) => 2,
BufferData::Vector3(_) => 3,
BufferData::Vector4(_) => 4,
BufferData::F32(_) => 1,
BufferData::I32(_) => 1,
BufferData::U32(_) => 1,
BufferData::I16(_) => 1,
BufferData::U16(_) => 1,
BufferData::I8(_) => 1,
BufferData::U8(_) => 1,
}
}
pub fn len(&self) -> usize {
match self {
BufferData::Matrix2(a) => a.len(),
BufferData::Matrix3(a) => a.len(),
BufferData::Matrix4(a) => a.len(),
BufferData::Vector2(a) => a.len(),
BufferData::Vector3(a) => a.len(),
BufferData::Vector4(a) => a.len(),
BufferData::F32(a) => a.len(),
BufferData::I32(a) => a.len(),
BufferData::U32(a) => a.len(),
BufferData::I16(a) => a.len(),
BufferData::U16(a) => a.len(),
BufferData::I8(a) => a.len(),
BufferData::U8(a) => a.len(),
}
}
pub fn elem_byte_len(&self) -> usize {
let bytes = match self {
BufferData::Matrix2(_) => mem::size_of::<f32>(),
BufferData::Matrix3(_) => mem::size_of::<f32>(),
BufferData::Matrix4(_) => mem::size_of::<f32>(),
BufferData::Vector2(_) => mem::size_of::<f32>(),
BufferData::Vector3(_) => mem::size_of::<f32>(),
BufferData::Vector4(_) => mem::size_of::<f32>(),
BufferData::F32(_) => mem::size_of::<f32>(),
BufferData::I32(_) => mem::size_of::<i32>(),
BufferData::U32(_) => mem::size_of::<u32>(),
BufferData::I16(_) => mem::size_of::<i16>(),
BufferData::U16(_) => mem::size_of::<u16>(),
BufferData::I8(_) => mem::size_of::<i8>(),
BufferData::U8(_) => mem::size_of::<u8>(),
};
self.item_size() * bytes
}
pub fn definition(&self) -> String {
match self {
BufferData::Matrix2(_) => "MAT2".to_string(),
BufferData::Matrix3(_) => "MAT3".to_string(),
BufferData::Matrix4(_) => "MAT4".to_string(),
BufferData::Vector2(_) => "VEC2".to_string(),
BufferData::Vector3(_) => "VEC3".to_string(),
BufferData::Vector4(_) => "VEC4".to_string(),
BufferData::F32(_) => "F32".to_string(),
BufferData::I32(_) => "I32".to_string(),
BufferData::U32(_) => "U32".to_string(),
BufferData::I16(_) => "I16".to_string(),
BufferData::U16(_) => "U16".to_string(),
BufferData::I8(_) => "I8".to_string(),
BufferData::U8(_) => "U8".to_string(),
} |
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum BufferType {
Position,
Normal,
Tangent,
UV(usize),
Color(usize),
Joint(usize),
Weight(usize),
Other(String),
}
impl BufferType {
pub fn definition(&self) -> String {
match self {
BufferType::Position => "POSITION".to_string(),
BufferType::Normal => "NORMAL".to_string(),
BufferType::Tangent => "TANGENT".to_string(),
BufferType::UV(n) => format!("UV_{}", n),
BufferType::Color(n) => format!("COLOR_{}", n),
BufferType::Joint(n) => format!("JOINT_{}", n),
BufferType::Weight(n) => format!("WEIGHT_{}", n),
BufferType::Other(string) => string.to_shouty_snake_case(),
}
}
}
#[derive(Clone, Debug)]
pub struct BufferAttribute {
pub data: BufferData,
pub buffer_type: BufferType,
pub dynamic: bool,
pub normalized: bool,
// pub version: usize,
}
impl BufferAttribute {
pub fn count(&self) -> usize {
let l = self.len();
l / self.item_size()
}
pub fn item_size(&self) -> usize {
self.data.item_size()
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn set_normalized(&mut self, normalized: bool) -> &mut Self {
self.normalized = normalized;
self
}
pub fn set_dynamic(&mut self, dynamic: bool) -> &mut Self {
self.dynamic = dynamic;
self
}
pub fn definition(&self) ->String {
format!("VERTEX_{}_{}", self.buffer_type.definition(), self.data.definition())
}
}
#[allow(dead_code)]
#[derive(Hash, Eq, PartialEq, Debug, Clone)]
pub struct BufferGroup {
pub start: usize,
pub material_index: usize,
pub count: usize,
pub name: Option<String>,
}
#[allow(dead_code)]
#[derive(Clone)]
pub struct BufferGeometry {
pub uuid: Uuid,
pub name: String,
pub groups: Vec<BufferGroup>,
pub indices: Vec<u32>,
pub attributes: Vec<BufferAttribute>,
pub buffer_order: Vec<BufferType>,
pub b_box: Option<BBox3<f32>>,
callbacks: Vec<fn(&mut BufferGeometry)>,
}
impl fmt::Debug for BufferGeometry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "=====================
BufferGeometry: {}
uuid: {}
groups: {:?}
b_box: {:?}
callbacks: {}
indices: {:?}
attributes: {:?}
=====================",
self.name,
self.uuid,
self.groups,
self.b_box,
self.callbacks.len(),
self.indices,
self.attributes,
)
}
}
#[allow(dead_code)]
impl BufferGeometry {
pub fn new() -> Self {
Self {
attributes: Vec::new(),
groups: Vec::new(),
indices: Vec::new(),
uuid: Uuid::new_v4(),
callbacks: Vec::new(),
name: "".to_string(),
b_box: None,
buffer_order: vec![BufferType::Position, BufferType::Normal, BufferType::UV(0), BufferType::Color(0), BufferType::Joint(0), BufferType::Weight(0)],
}
}
pub fn iter_attributes<'a>(&'a self) -> impl Iterator<Item= &'a BufferAttribute> {
self.buffer_order.iter()
.map(move |e| self.get_attribute(e.clone()) )
.filter(|e| e.is_some() )
.map(|e| e.unwrap() )
}
// pub fn iter_attributes_mut<'a>(&'a mut self) -> impl Iterator<Item= &'a mut BufferAttribute> {
// self.buffer_order.iter()
// .map(move |e| self.get_attribute_mut(e.clone()) )
// .filter(|e| e.is_some() )
// .map(|e| e.unwrap() )
// }
pub fn set_indices(&mut self, indices: Vec<u32>) -> &mut Self {
self.indices = indices;
self
}
pub fn gen_indices(&mut self) -> Result<(), &str> {
let mut len = 0;
match self.get_attribute(BufferType::Position) {
None => {
return Err("BufferGeometry: cant find position");
}
Some(positions) => {
len = positions.len();
}
};
let indices = (0..len as u32).collect();
self.set_indices(indices);
Ok(())
}
pub fn add_buffer_attribute(
&mut self,
buffer_attribute: BufferAttribute,
) -> &mut BufferAttribute {
let index = self.attributes.iter().position( |attr| attr.buffer_type == buffer_attribute.buffer_type);
if let Some(index) = index {
self.attributes.remove(index);
}
self.attributes.push(buffer_attribute);
if!self.attributes.iter().all( |e| e.len() == self.attributes[0].len() ) {
panic!("BufferGeometry: different buffer length: {}", self.name);
}
let i = self.attributes.len() - 1;
&mut self.attributes[i]
}
pub fn create_buffer_attribute(
&mut self,
buffer_type: BufferType,
data: BufferData,
) -> &mut BufferAttribute {
let buffer_attribute = BufferAttribute {
buffer_type,
data,
normalized: false,
dynamic: false,
// version: 0,
};
self.add_buffer_attribute(buffer_attribute)
}
pub fn on_drop(&mut self, cb: fn(&mut BufferGeometry)) {
self.callbacks.push(cb);
}
pub fn get_attribute(&self, buffer_type: BufferType) -> Option<&BufferAttribute> {
self.attributes.iter().find(|e| e.buffer_type == buffer_type)
}
pub fn has_attribute(&self, buffer_type: BufferType) -> bool {
self.attributes.iter().any(|e| e.buffer_type == buffer_type)
}
pub fn get_attribute_mut(&mut self, buffer_type: BufferType) -> Option<&mut BufferAttribute> {
self.attributes.iter_mut().find(|e| e.buffer_type == buffer_type)
}
pub fn generate_normals(&mut self) {
let mut normals = None;
{
let attribute = self.get_attribute(BufferType::Position).unwrap();
if let BufferData::Vector3(data) = &attribute.data {
let mut calc_normals = vec![Vec::new(); data.len()];
let indices = &self.indices;
let il = indices.len();
let mut i = 0;
while i < il {
let a = &data[ indices[i] as usize];
let b = &data[ indices[i+1] as usize];
let c = &data[ indices[i+2] as usize];
let mut cb = c - b;
let ab = a - b;
cb.cross(&ab);
cb.normalize();
calc_normals[ indices[i] as usize ].push(cb.clone());
calc_normals[ indices[i+1] as usize ].push(cb.clone());
calc_normals[ indices[i+2] as usize ].push(cb);
i+=3;
}
let calc_normals = calc_normals
.iter()
.map(|items|{
if items.len() == 1 {
return items[0].clone();
}
let mut res = Vector3::add_all_vectors(items);
res.normalize();
res
})
.collect();
normals = Some(calc_normals);
}
}
if let Some(normal) = normals {
self.create_buffer_attribute(BufferType::Normal, BufferData::Vector3(normal));
}
}
pub fn duplicate(&self) -> Self {
let mut data = self.clone();
data.uuid = Uuid::new_v4();
data
}
pub fn update_box3 (&mut self) -> Result <(), Box<Error>> {
let mut b_box = None;
if let Some(attr) = self.get_attribute(BufferType::Position) {
if let BufferData::Vector3(positions) = &attr.data {
let mut b = BBox3::new_empty();
b.set_from_array(&positions[..]);
b_box = Some(b);
}
}
if b_box.is_none() {return Err( Box::from("cant update b_box") ); }
self.b_box = b_box;
Ok(())
}
pub fn get_b_box(&mut self) -> Result<BBox3<f32>, Box<Error>> {
if self.b_box.is_some() {
return Ok(self.b_box.as_ref().unwrap().clone())
}
self.update_box3()?;
Ok(self.b_box.as_ref().unwrap().clone())
}
pub fn scale_positions_by_vec(&mut self, v: &Vector3<f32>) -> Option<()> {
if let Some(attr) = self.get_attribute_mut(BufferType::Position) {
if let BufferData::Vector3(positions) = &mut attr.data {
positions
.iter_mut()
.for_each(|e| {
e.multiply(v);
});
return Some(());
}
return None;
}
None
}
pub fn get_vertex_byte_size(&self) -> usize {
self.iter_attributes().map(|attr| attr.data.elem_byte_len()).sum()
}
}
impl Drop for BufferGeometry {
fn drop(&mut self) {
while self.callbacks.len() > 0 {
let cb = self.callbacks.pop().unwrap();
cb(self);
}
}
}
#[derive(Clone)]
pub struct SharedGeometry (Arc<Mutex<BufferGeometry>>);
impl SharedGeometry {
pub fn new(g: BufferGeometry) -> Self {
SharedGeometry(Arc::new(Mutex::new(g)))
}
pub fn lock(&mut self) -> LockResult<MutexGuard<BufferGeometry>> {
self.0.lock()
}
}
impl Component for SharedGeometry {
type Storage = VecStorage<Self>;
} | }
} | random_line_split |
span.rs | use crate::{
buffer::{
cell_buffer::{Contacts, Endorse},
fragment_buffer::FragmentSpan,
FragmentBuffer, Property, PropertyBuffer, StringBuffer,
},
fragment,
fragment::Circle,
map::{circle_map, UNICODE_FRAGMENTS},
Cell, Fragment, Merge, Point, Settings,
};
use itertools::Itertools;
use std::{
fmt,
ops::{Deref, DerefMut},
};
/// A describes where a char came from relative to the source ascii text
/// The primary purpose of span is to group adjacent cell together
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Span(pub Vec<(Cell, char)>);
impl Deref for Span {
type Target = Vec<(Cell, char)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct Bounds {
top_left: Cell,
bottom_right: Cell,
}
impl DerefMut for Span {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<Vec<(Cell, char)>> for Span {
fn from(cell_chars: Vec<(Cell, char)>) -> Self {
Span(cell_chars)
}
}
impl Span {
pub(crate) fn new(cell: Cell, ch: char) -> Self {
Span(vec![(cell, ch)])
}
pub(super) fn is_adjacent(&self, cell: &Cell) -> bool {
self.iter()
.rev()
.any(|(ex_cell, _)| ex_cell.is_adjacent(cell))
}
/// if any cell of this span is adjacent to any cell of the other
/// Use.rev() to check the last cell of this Span against the first cell of the other Span
/// They have a high change of matching faster
pub(super) fn can_merge(&self, other: &Self) -> bool {
self.iter().rev().any(|(cell, _)| {
other
.iter()
.any(|(other_cell, _)| cell.is_adjacent(other_cell))
})
}
/// paste the other Span at cell location `loc`
pub fn paste_at(&self, loc: Cell, other: &Self) -> Self {
let mut this = self.clone();
for (cell, ch) in other.deref() {
this.push((*cell + loc, *ch));
}
this.sort();
this.dedup();
this
}
fn top_left(&self) -> Cell {
let bounds = self.bounds().expect("must have bounds");
bounds.0
}
pub fn localize_point(&self, point: Point) -> Point {
self.top_left().localize_point(point)
}
/// returns the top_left most cell which aligns the top most and the left most cell.
pub(crate) fn bounds(&self) -> Option<(Cell, Cell)> {
if let Some((min_y, max_y)) =
self.iter().map(|(cell, _)| cell.y).minmax().into_option()
{
if let Some((min_x, max_x)) =
self.iter().map(|(cell, _)| cell.x).minmax().into_option()
{
Some((Cell::new(min_x, min_y), Cell::new(max_x, max_y)))
} else {
None
}
} else {
None
}
}
pub fn cell_bounds(&self) -> Option<Bounds> {
if let Some((top_left, top_right)) = self.bounds() {
Some(Bounds::new(top_left, top_right))
} else {
None
}
}
/// shift the cells relative to the top_left most bound
pub(crate) fn localize(self) -> Self {
if let Some((tl, _br)) = self.bounds() {
let mut new_self = Span(vec![]);
for (cell, ch) in self.iter() {
let local_cell = tl.localize_cell(*cell);
new_self.push((local_cell, *ch));
}
new_self
} else {
self
}
}
/// convert this span into fragments applying endorsement
/// of group into fragments
///
///
/// TODO: return the rejects as Span, instead of Contacts
pub(crate) fn endorse(self) -> Endorse<FragmentSpan, Span> {
// try to endorse as circles or arcs
let (mut accepted, un_endorsed_span): (Vec<FragmentSpan>, Span) =
self.endorse_to_arcs_and_circles();
// convert into contacts and try to endorse as rects fragments
let un_endorsed_contacts: Vec<Contacts> = un_endorsed_span.into();
let rect_endorsed: Endorse<FragmentSpan, Contacts> =
Contacts::endorse_rects(un_endorsed_contacts);
accepted.extend(rect_endorsed.accepted);
let re_endorsed = Self::re_endorse(rect_endorsed.rejects);
let mut endorsed = Endorse {
accepted,
rejects: vec![],
};
endorsed.extend(re_endorsed);
endorsed
}
/// re try endorsing the contacts into arc and circles by converting it to span first
fn re_endorse(rect_rejects: Vec<Contacts>) -> Endorse<FragmentSpan, Span> {
// convert back to span
let span_rejects: Vec<Span> = rect_rejects
.into_iter()
.map(|contact| contact.span())
.collect();
let span_rejects: Vec<Span> = Span::merge_recursive(span_rejects);
// try to endorse as circles or arcs one more time
let (accepted, rejects): (Vec<Vec<FragmentSpan>>, Vec<Span>) =
span_rejects
.into_iter()
.map(|span| span.endorse_to_arcs_and_circles())
.unzip();
Endorse {
accepted: accepted.into_iter().flatten().collect(),
rejects,
}
}
/// endorse this span into circles, half_circle, quarter_circle only
fn endorse_to_arcs_and_circles(self) -> (Vec<FragmentSpan>, Span) {
let mut accepted = vec![];
let (top_left, _) = self.bounds().expect("must have bounds");
let un_endorsed_span: Span = if let Some((circle, un_endorsed_span)) =
circle_map::endorse_circle_span(&self)
{
let circle = circle.absolute_position(top_left);
let circle_frag_span =
FragmentSpan::new(self.clone(), circle.into());
accepted.push(circle_frag_span);
un_endorsed_span
} else if let Some((three_quarters_arc, un_endorsed_span)) =
circle_map::endorse_three_quarters_arc_span(&self)
{
let three_quarters_arc =
three_quarters_arc.absolute_position(top_left);
let three_quarters_arc_frag_span =
FragmentSpan::new(self.clone(), three_quarters_arc.into());
accepted.push(three_quarters_arc_frag_span);
un_endorsed_span
} else if let Some((half_arc, un_endorsed_span)) =
circle_map::endorse_half_arc_span(&self)
{
let half_arc = half_arc.absolute_position(top_left);
let half_arc_frag_span =
FragmentSpan::new(self.clone(), half_arc.into());
accepted.push(half_arc_frag_span);
un_endorsed_span
} else if let Some((arc, un_endorsed_span)) =
circle_map::endorse_quarter_arc_span(&self)
{
let arc = arc.absolute_position(top_left);
let arc_frag_span = FragmentSpan::new(self.clone(), arc.into());
accepted.push(arc_frag_span);
un_endorsed_span
} else {
self
};
(accepted, un_endorsed_span)
}
/// create a span of the cells that is inside of the start and end bound cells
pub(crate) fn extract(&self, bound1: Cell, bound2: Cell) -> Self {
Span(
self.iter()
.map(|(cell, ch)| (*cell, *ch))
.filter(|(cell, _ch)| cell.is_bounded(bound1, bound2))
.collect(),
)
}
/// returns true if any cell on this span
/// is within the bounds of `bound1` and `bound2`
pub fn is_bounded(&self, bound1: Cell, bound2: Cell) -> bool {
self.iter()
.all(|(cell, ch)| cell.is_bounded(bound1, bound2))
}
pub fn hit_cell(&self, needle: Cell) -> bool |
/// merge as is without checking it it can
pub fn merge_no_check(&self, other: &Self) -> Self {
let mut cells = self.0.clone();
cells.extend(&other.0);
Span(cells)
}
}
impl Merge for Span {
fn merge(&self, other: &Self) -> Option<Self> {
if self.can_merge(other) {
Some(self.merge_no_check(other))
} else {
None
}
}
}
impl Bounds {
pub fn new(cell1: Cell, cell2: Cell) -> Self {
let (top_left, bottom_right) = Cell::rearrange_bound(cell1, cell2);
Self {
top_left,
bottom_right,
}
}
pub fn top_left(&self) -> Cell {
self.top_left
}
pub fn bottom_right(&self) -> Cell {
self.bottom_right
}
pub fn top_right(&self) -> Cell {
Cell::new(self.bottom_right.x, self.top_left.y)
}
pub fn bottom_left(&self) -> Cell {
Cell::new(self.top_left.x, self.bottom_right.y)
}
}
/// create a property buffer for all the cells of this span
impl<'p> From<Span> for PropertyBuffer<'p> {
fn from(span: Span) -> Self {
let mut pb = PropertyBuffer::new();
for (cell, ch) in span.iter() {
if let Some(property) = Property::from_char(*ch) {
pb.as_mut().insert(*cell, property);
}
}
pb
}
}
/// Grouping cell by adjacents are not enough
///
/// grouping them together when they are actually connected
/// is the most approprivate way of grouping
/// Span just provides an optimization of the number
/// of elements to be checked.
/// Only elements on the same span are checked to see if they
/// belong on the same group
///
impl From<Span> for Vec<Contacts> {
fn from(span: Span) -> Vec<Contacts> {
let fb = FragmentBuffer::from(span);
let merged_fragments: Vec<FragmentSpan> = fb.merge_fragment_spans();
let contacts: Vec<Contacts> = merged_fragments
.into_iter()
.map(|frag| Contacts::new(frag))
.collect();
Contacts::merge_recursive(contacts)
}
}
/// First we crate a property buffer based on the cell,char content of this span
/// and then based on the property, we extract the accurate fragments
///
/// If a character has no property, try to see if has equivalent fragments from unicode_map
/// otherwise add it to the fragment_buffer as a text fragment
impl From<Span> for FragmentBuffer {
fn from(span: Span) -> FragmentBuffer {
let pb = PropertyBuffer::from(span.clone());
let mut fb = FragmentBuffer::from(pb.clone());
for (cell, ch) in span.iter() {
if pb.as_ref().get(cell).is_none() {
if let Some(fragments) = UNICODE_FRAGMENTS.get(ch) {
fb.add_fragments_to_cell(*cell, *ch, fragments.clone());
} else {
fb.add_fragment_to_cell(
*cell,
*ch,
fragment::cell_text(*ch),
);
}
}
}
fb
}
}
impl fmt::Display for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = StringBuffer::new();
if let Some((tl, _br)) = self.bounds() {
for (cell, ch) in self.iter() {
if *ch!= '\0' &&!ch.is_whitespace() {
let local = tl.localize_cell(*cell);
buffer.add_char(local.x, local.y, *ch);
}
}
}
write!(f, "{}", buffer.to_string())
}
}
#[cfg(test)]
mod test_span;
| {
self.iter().any(|(cell, ch)| *cell == needle)
} | identifier_body |
span.rs | use crate::{
buffer::{
cell_buffer::{Contacts, Endorse},
fragment_buffer::FragmentSpan,
FragmentBuffer, Property, PropertyBuffer, StringBuffer,
},
fragment,
fragment::Circle,
map::{circle_map, UNICODE_FRAGMENTS},
Cell, Fragment, Merge, Point, Settings,
};
use itertools::Itertools;
use std::{
fmt,
ops::{Deref, DerefMut},
};
/// A describes where a char came from relative to the source ascii text
/// The primary purpose of span is to group adjacent cell together
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Span(pub Vec<(Cell, char)>);
impl Deref for Span {
type Target = Vec<(Cell, char)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct Bounds {
top_left: Cell,
bottom_right: Cell,
}
impl DerefMut for Span {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<Vec<(Cell, char)>> for Span {
fn from(cell_chars: Vec<(Cell, char)>) -> Self {
Span(cell_chars)
}
}
impl Span {
pub(crate) fn new(cell: Cell, ch: char) -> Self {
Span(vec![(cell, ch)])
}
pub(super) fn is_adjacent(&self, cell: &Cell) -> bool {
self.iter()
.rev()
.any(|(ex_cell, _)| ex_cell.is_adjacent(cell))
}
/// if any cell of this span is adjacent to any cell of the other
/// Use.rev() to check the last cell of this Span against the first cell of the other Span
/// They have a high change of matching faster
pub(super) fn can_merge(&self, other: &Self) -> bool {
self.iter().rev().any(|(cell, _)| {
other
.iter()
.any(|(other_cell, _)| cell.is_adjacent(other_cell))
})
}
/// paste the other Span at cell location `loc`
pub fn paste_at(&self, loc: Cell, other: &Self) -> Self {
let mut this = self.clone();
for (cell, ch) in other.deref() {
this.push((*cell + loc, *ch));
}
this.sort();
this.dedup();
this
}
fn top_left(&self) -> Cell {
let bounds = self.bounds().expect("must have bounds");
bounds.0
}
pub fn localize_point(&self, point: Point) -> Point {
self.top_left().localize_point(point)
}
/// returns the top_left most cell which aligns the top most and the left most cell.
pub(crate) fn bounds(&self) -> Option<(Cell, Cell)> {
if let Some((min_y, max_y)) =
self.iter().map(|(cell, _)| cell.y).minmax().into_option()
{
if let Some((min_x, max_x)) =
self.iter().map(|(cell, _)| cell.x).minmax().into_option()
{
Some((Cell::new(min_x, min_y), Cell::new(max_x, max_y)))
} else {
None
}
} else {
None
}
}
pub fn cell_bounds(&self) -> Option<Bounds> {
if let Some((top_left, top_right)) = self.bounds() {
Some(Bounds::new(top_left, top_right))
} else {
None
}
}
/// shift the cells relative to the top_left most bound
pub(crate) fn localize(self) -> Self {
if let Some((tl, _br)) = self.bounds() {
let mut new_self = Span(vec![]);
for (cell, ch) in self.iter() {
let local_cell = tl.localize_cell(*cell);
new_self.push((local_cell, *ch));
}
new_self
} else {
self
}
}
/// convert this span into fragments applying endorsement
/// of group into fragments
///
///
/// TODO: return the rejects as Span, instead of Contacts
pub(crate) fn endorse(self) -> Endorse<FragmentSpan, Span> {
// try to endorse as circles or arcs
let (mut accepted, un_endorsed_span): (Vec<FragmentSpan>, Span) =
self.endorse_to_arcs_and_circles();
// convert into contacts and try to endorse as rects fragments
let un_endorsed_contacts: Vec<Contacts> = un_endorsed_span.into();
let rect_endorsed: Endorse<FragmentSpan, Contacts> =
Contacts::endorse_rects(un_endorsed_contacts);
accepted.extend(rect_endorsed.accepted);
let re_endorsed = Self::re_endorse(rect_endorsed.rejects);
let mut endorsed = Endorse {
accepted,
rejects: vec![],
};
endorsed.extend(re_endorsed);
endorsed
}
/// re try endorsing the contacts into arc and circles by converting it to span first
fn re_endorse(rect_rejects: Vec<Contacts>) -> Endorse<FragmentSpan, Span> {
// convert back to span
let span_rejects: Vec<Span> = rect_rejects
.into_iter()
.map(|contact| contact.span())
.collect();
let span_rejects: Vec<Span> = Span::merge_recursive(span_rejects);
// try to endorse as circles or arcs one more time
let (accepted, rejects): (Vec<Vec<FragmentSpan>>, Vec<Span>) =
span_rejects
.into_iter()
.map(|span| span.endorse_to_arcs_and_circles())
.unzip();
Endorse {
accepted: accepted.into_iter().flatten().collect(),
rejects,
}
}
/// endorse this span into circles, half_circle, quarter_circle only
fn endorse_to_arcs_and_circles(self) -> (Vec<FragmentSpan>, Span) {
let mut accepted = vec![];
let (top_left, _) = self.bounds().expect("must have bounds");
let un_endorsed_span: Span = if let Some((circle, un_endorsed_span)) =
circle_map::endorse_circle_span(&self)
{
let circle = circle.absolute_position(top_left);
let circle_frag_span =
FragmentSpan::new(self.clone(), circle.into());
accepted.push(circle_frag_span);
un_endorsed_span
} else if let Some((three_quarters_arc, un_endorsed_span)) =
circle_map::endorse_three_quarters_arc_span(&self)
{
let three_quarters_arc =
three_quarters_arc.absolute_position(top_left);
let three_quarters_arc_frag_span =
FragmentSpan::new(self.clone(), three_quarters_arc.into());
accepted.push(three_quarters_arc_frag_span);
un_endorsed_span
} else if let Some((half_arc, un_endorsed_span)) =
circle_map::endorse_half_arc_span(&self)
{
let half_arc = half_arc.absolute_position(top_left);
let half_arc_frag_span =
FragmentSpan::new(self.clone(), half_arc.into());
accepted.push(half_arc_frag_span);
un_endorsed_span
} else if let Some((arc, un_endorsed_span)) =
circle_map::endorse_quarter_arc_span(&self)
{
let arc = arc.absolute_position(top_left);
let arc_frag_span = FragmentSpan::new(self.clone(), arc.into());
accepted.push(arc_frag_span);
un_endorsed_span
} else {
self
};
(accepted, un_endorsed_span)
}
/// create a span of the cells that is inside of the start and end bound cells
pub(crate) fn extract(&self, bound1: Cell, bound2: Cell) -> Self {
Span(
self.iter()
.map(|(cell, ch)| (*cell, *ch))
.filter(|(cell, _ch)| cell.is_bounded(bound1, bound2))
.collect(),
)
}
/// returns true if any cell on this span
/// is within the bounds of `bound1` and `bound2`
pub fn is_bounded(&self, bound1: Cell, bound2: Cell) -> bool {
self.iter()
.all(|(cell, ch)| cell.is_bounded(bound1, bound2))
}
pub fn hit_cell(&self, needle: Cell) -> bool {
self.iter().any(|(cell, ch)| *cell == needle)
}
/// merge as is without checking it it can
pub fn merge_no_check(&self, other: &Self) -> Self {
let mut cells = self.0.clone();
cells.extend(&other.0);
Span(cells)
}
}
impl Merge for Span {
fn merge(&self, other: &Self) -> Option<Self> {
if self.can_merge(other) {
Some(self.merge_no_check(other))
} else |
}
}
impl Bounds {
pub fn new(cell1: Cell, cell2: Cell) -> Self {
let (top_left, bottom_right) = Cell::rearrange_bound(cell1, cell2);
Self {
top_left,
bottom_right,
}
}
pub fn top_left(&self) -> Cell {
self.top_left
}
pub fn bottom_right(&self) -> Cell {
self.bottom_right
}
pub fn top_right(&self) -> Cell {
Cell::new(self.bottom_right.x, self.top_left.y)
}
pub fn bottom_left(&self) -> Cell {
Cell::new(self.top_left.x, self.bottom_right.y)
}
}
/// create a property buffer for all the cells of this span
impl<'p> From<Span> for PropertyBuffer<'p> {
fn from(span: Span) -> Self {
let mut pb = PropertyBuffer::new();
for (cell, ch) in span.iter() {
if let Some(property) = Property::from_char(*ch) {
pb.as_mut().insert(*cell, property);
}
}
pb
}
}
/// Grouping cell by adjacents are not enough
///
/// grouping them together when they are actually connected
/// is the most approprivate way of grouping
/// Span just provides an optimization of the number
/// of elements to be checked.
/// Only elements on the same span are checked to see if they
/// belong on the same group
///
impl From<Span> for Vec<Contacts> {
fn from(span: Span) -> Vec<Contacts> {
let fb = FragmentBuffer::from(span);
let merged_fragments: Vec<FragmentSpan> = fb.merge_fragment_spans();
let contacts: Vec<Contacts> = merged_fragments
.into_iter()
.map(|frag| Contacts::new(frag))
.collect();
Contacts::merge_recursive(contacts)
}
}
/// First we crate a property buffer based on the cell,char content of this span
/// and then based on the property, we extract the accurate fragments
///
/// If a character has no property, try to see if has equivalent fragments from unicode_map
/// otherwise add it to the fragment_buffer as a text fragment
impl From<Span> for FragmentBuffer {
fn from(span: Span) -> FragmentBuffer {
let pb = PropertyBuffer::from(span.clone());
let mut fb = FragmentBuffer::from(pb.clone());
for (cell, ch) in span.iter() {
if pb.as_ref().get(cell).is_none() {
if let Some(fragments) = UNICODE_FRAGMENTS.get(ch) {
fb.add_fragments_to_cell(*cell, *ch, fragments.clone());
} else {
fb.add_fragment_to_cell(
*cell,
*ch,
fragment::cell_text(*ch),
);
}
}
}
fb
}
}
impl fmt::Display for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = StringBuffer::new();
if let Some((tl, _br)) = self.bounds() {
for (cell, ch) in self.iter() {
if *ch!= '\0' &&!ch.is_whitespace() {
let local = tl.localize_cell(*cell);
buffer.add_char(local.x, local.y, *ch);
}
}
}
write!(f, "{}", buffer.to_string())
}
}
#[cfg(test)]
mod test_span;
| {
None
} | conditional_block |
span.rs | use crate::{
buffer::{
cell_buffer::{Contacts, Endorse},
fragment_buffer::FragmentSpan,
FragmentBuffer, Property, PropertyBuffer, StringBuffer,
},
fragment,
fragment::Circle,
map::{circle_map, UNICODE_FRAGMENTS},
Cell, Fragment, Merge, Point, Settings,
};
use itertools::Itertools;
use std::{
fmt,
ops::{Deref, DerefMut},
};
/// A describes where a char came from relative to the source ascii text
/// The primary purpose of span is to group adjacent cell together
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Span(pub Vec<(Cell, char)>);
impl Deref for Span {
type Target = Vec<(Cell, char)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct Bounds {
top_left: Cell,
bottom_right: Cell,
}
impl DerefMut for Span {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<Vec<(Cell, char)>> for Span {
fn from(cell_chars: Vec<(Cell, char)>) -> Self {
Span(cell_chars)
}
}
impl Span {
pub(crate) fn new(cell: Cell, ch: char) -> Self {
Span(vec![(cell, ch)])
}
pub(super) fn is_adjacent(&self, cell: &Cell) -> bool {
self.iter()
.rev()
.any(|(ex_cell, _)| ex_cell.is_adjacent(cell))
}
/// if any cell of this span is adjacent to any cell of the other
/// Use.rev() to check the last cell of this Span against the first cell of the other Span
/// They have a high change of matching faster
pub(super) fn can_merge(&self, other: &Self) -> bool {
self.iter().rev().any(|(cell, _)| {
other
.iter()
.any(|(other_cell, _)| cell.is_adjacent(other_cell))
})
}
/// paste the other Span at cell location `loc`
pub fn paste_at(&self, loc: Cell, other: &Self) -> Self {
let mut this = self.clone();
for (cell, ch) in other.deref() {
this.push((*cell + loc, *ch));
}
this.sort();
this.dedup();
this
}
fn top_left(&self) -> Cell {
let bounds = self.bounds().expect("must have bounds");
bounds.0
}
pub fn localize_point(&self, point: Point) -> Point {
self.top_left().localize_point(point)
}
/// returns the top_left most cell which aligns the top most and the left most cell.
pub(crate) fn bounds(&self) -> Option<(Cell, Cell)> {
if let Some((min_y, max_y)) =
self.iter().map(|(cell, _)| cell.y).minmax().into_option()
{
if let Some((min_x, max_x)) =
self.iter().map(|(cell, _)| cell.x).minmax().into_option()
{
Some((Cell::new(min_x, min_y), Cell::new(max_x, max_y)))
} else {
None
}
} else {
None
}
}
pub fn cell_bounds(&self) -> Option<Bounds> {
if let Some((top_left, top_right)) = self.bounds() {
Some(Bounds::new(top_left, top_right))
} else {
None
}
}
/// shift the cells relative to the top_left most bound
pub(crate) fn localize(self) -> Self {
if let Some((tl, _br)) = self.bounds() {
let mut new_self = Span(vec![]);
for (cell, ch) in self.iter() {
let local_cell = tl.localize_cell(*cell);
new_self.push((local_cell, *ch));
}
new_self
} else {
self
}
}
/// convert this span into fragments applying endorsement
/// of group into fragments
///
///
/// TODO: return the rejects as Span, instead of Contacts
pub(crate) fn endorse(self) -> Endorse<FragmentSpan, Span> {
// try to endorse as circles or arcs
let (mut accepted, un_endorsed_span): (Vec<FragmentSpan>, Span) =
self.endorse_to_arcs_and_circles();
// convert into contacts and try to endorse as rects fragments
let un_endorsed_contacts: Vec<Contacts> = un_endorsed_span.into();
let rect_endorsed: Endorse<FragmentSpan, Contacts> =
Contacts::endorse_rects(un_endorsed_contacts);
accepted.extend(rect_endorsed.accepted);
let re_endorsed = Self::re_endorse(rect_endorsed.rejects);
let mut endorsed = Endorse {
accepted,
rejects: vec![],
};
endorsed.extend(re_endorsed);
endorsed
}
/// re try endorsing the contacts into arc and circles by converting it to span first
fn re_endorse(rect_rejects: Vec<Contacts>) -> Endorse<FragmentSpan, Span> {
// convert back to span
let span_rejects: Vec<Span> = rect_rejects
.into_iter()
.map(|contact| contact.span())
.collect();
let span_rejects: Vec<Span> = Span::merge_recursive(span_rejects);
// try to endorse as circles or arcs one more time
let (accepted, rejects): (Vec<Vec<FragmentSpan>>, Vec<Span>) =
span_rejects
.into_iter()
.map(|span| span.endorse_to_arcs_and_circles())
.unzip();
Endorse {
accepted: accepted.into_iter().flatten().collect(),
rejects,
}
}
/// endorse this span into circles, half_circle, quarter_circle only
fn endorse_to_arcs_and_circles(self) -> (Vec<FragmentSpan>, Span) {
let mut accepted = vec![];
let (top_left, _) = self.bounds().expect("must have bounds");
let un_endorsed_span: Span = if let Some((circle, un_endorsed_span)) =
circle_map::endorse_circle_span(&self)
{
let circle = circle.absolute_position(top_left);
let circle_frag_span =
FragmentSpan::new(self.clone(), circle.into());
accepted.push(circle_frag_span);
un_endorsed_span
} else if let Some((three_quarters_arc, un_endorsed_span)) =
circle_map::endorse_three_quarters_arc_span(&self)
{
let three_quarters_arc =
three_quarters_arc.absolute_position(top_left);
let three_quarters_arc_frag_span =
FragmentSpan::new(self.clone(), three_quarters_arc.into());
accepted.push(three_quarters_arc_frag_span);
un_endorsed_span
} else if let Some((half_arc, un_endorsed_span)) =
circle_map::endorse_half_arc_span(&self)
{
let half_arc = half_arc.absolute_position(top_left);
let half_arc_frag_span =
FragmentSpan::new(self.clone(), half_arc.into());
accepted.push(half_arc_frag_span);
un_endorsed_span
} else if let Some((arc, un_endorsed_span)) =
circle_map::endorse_quarter_arc_span(&self)
{
let arc = arc.absolute_position(top_left);
let arc_frag_span = FragmentSpan::new(self.clone(), arc.into());
accepted.push(arc_frag_span);
un_endorsed_span
} else {
self
};
(accepted, un_endorsed_span)
}
/// create a span of the cells that is inside of the start and end bound cells
pub(crate) fn extract(&self, bound1: Cell, bound2: Cell) -> Self {
Span(
self.iter()
.map(|(cell, ch)| (*cell, *ch))
.filter(|(cell, _ch)| cell.is_bounded(bound1, bound2))
.collect(),
)
}
/// returns true if any cell on this span
/// is within the bounds of `bound1` and `bound2`
pub fn is_bounded(&self, bound1: Cell, bound2: Cell) -> bool {
self.iter()
.all(|(cell, ch)| cell.is_bounded(bound1, bound2))
}
pub fn hit_cell(&self, needle: Cell) -> bool {
self.iter().any(|(cell, ch)| *cell == needle)
}
/// merge as is without checking it it can
pub fn merge_no_check(&self, other: &Self) -> Self {
let mut cells = self.0.clone();
cells.extend(&other.0);
Span(cells)
}
}
impl Merge for Span {
fn merge(&self, other: &Self) -> Option<Self> {
if self.can_merge(other) {
Some(self.merge_no_check(other))
} else {
None
}
}
}
impl Bounds {
pub fn new(cell1: Cell, cell2: Cell) -> Self {
let (top_left, bottom_right) = Cell::rearrange_bound(cell1, cell2);
Self {
top_left,
bottom_right,
}
}
pub fn top_left(&self) -> Cell {
self.top_left
}
pub fn bottom_right(&self) -> Cell {
self.bottom_right
}
pub fn top_right(&self) -> Cell {
Cell::new(self.bottom_right.x, self.top_left.y)
}
pub fn | (&self) -> Cell {
Cell::new(self.top_left.x, self.bottom_right.y)
}
}
/// create a property buffer for all the cells of this span
impl<'p> From<Span> for PropertyBuffer<'p> {
fn from(span: Span) -> Self {
let mut pb = PropertyBuffer::new();
for (cell, ch) in span.iter() {
if let Some(property) = Property::from_char(*ch) {
pb.as_mut().insert(*cell, property);
}
}
pb
}
}
/// Grouping cell by adjacents are not enough
///
/// grouping them together when they are actually connected
/// is the most approprivate way of grouping
/// Span just provides an optimization of the number
/// of elements to be checked.
/// Only elements on the same span are checked to see if they
/// belong on the same group
///
impl From<Span> for Vec<Contacts> {
fn from(span: Span) -> Vec<Contacts> {
let fb = FragmentBuffer::from(span);
let merged_fragments: Vec<FragmentSpan> = fb.merge_fragment_spans();
let contacts: Vec<Contacts> = merged_fragments
.into_iter()
.map(|frag| Contacts::new(frag))
.collect();
Contacts::merge_recursive(contacts)
}
}
/// First we crate a property buffer based on the cell,char content of this span
/// and then based on the property, we extract the accurate fragments
///
/// If a character has no property, try to see if has equivalent fragments from unicode_map
/// otherwise add it to the fragment_buffer as a text fragment
impl From<Span> for FragmentBuffer {
fn from(span: Span) -> FragmentBuffer {
let pb = PropertyBuffer::from(span.clone());
let mut fb = FragmentBuffer::from(pb.clone());
for (cell, ch) in span.iter() {
if pb.as_ref().get(cell).is_none() {
if let Some(fragments) = UNICODE_FRAGMENTS.get(ch) {
fb.add_fragments_to_cell(*cell, *ch, fragments.clone());
} else {
fb.add_fragment_to_cell(
*cell,
*ch,
fragment::cell_text(*ch),
);
}
}
}
fb
}
}
impl fmt::Display for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = StringBuffer::new();
if let Some((tl, _br)) = self.bounds() {
for (cell, ch) in self.iter() {
if *ch!= '\0' &&!ch.is_whitespace() {
let local = tl.localize_cell(*cell);
buffer.add_char(local.x, local.y, *ch);
}
}
}
write!(f, "{}", buffer.to_string())
}
}
#[cfg(test)]
mod test_span;
| bottom_left | identifier_name |
span.rs | use crate::{
buffer::{
cell_buffer::{Contacts, Endorse},
fragment_buffer::FragmentSpan,
FragmentBuffer, Property, PropertyBuffer, StringBuffer,
},
fragment,
fragment::Circle,
map::{circle_map, UNICODE_FRAGMENTS},
Cell, Fragment, Merge, Point, Settings,
};
use itertools::Itertools;
use std::{
fmt,
ops::{Deref, DerefMut},
};
/// A describes where a char came from relative to the source ascii text
/// The primary purpose of span is to group adjacent cell together
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Span(pub Vec<(Cell, char)>);
impl Deref for Span {
type Target = Vec<(Cell, char)>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub struct Bounds {
top_left: Cell,
bottom_right: Cell,
}
impl DerefMut for Span {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<Vec<(Cell, char)>> for Span {
fn from(cell_chars: Vec<(Cell, char)>) -> Self {
Span(cell_chars)
}
}
impl Span {
pub(crate) fn new(cell: Cell, ch: char) -> Self {
Span(vec![(cell, ch)])
}
pub(super) fn is_adjacent(&self, cell: &Cell) -> bool {
self.iter()
.rev()
.any(|(ex_cell, _)| ex_cell.is_adjacent(cell))
}
/// if any cell of this span is adjacent to any cell of the other
/// Use.rev() to check the last cell of this Span against the first cell of the other Span
/// They have a high change of matching faster
pub(super) fn can_merge(&self, other: &Self) -> bool {
self.iter().rev().any(|(cell, _)| {
other
.iter()
.any(|(other_cell, _)| cell.is_adjacent(other_cell))
})
}
/// paste the other Span at cell location `loc`
pub fn paste_at(&self, loc: Cell, other: &Self) -> Self {
let mut this = self.clone();
for (cell, ch) in other.deref() {
this.push((*cell + loc, *ch));
}
this.sort();
this.dedup();
this
}
fn top_left(&self) -> Cell {
let bounds = self.bounds().expect("must have bounds");
bounds.0
}
pub fn localize_point(&self, point: Point) -> Point {
self.top_left().localize_point(point)
}
/// returns the top_left most cell which aligns the top most and the left most cell.
pub(crate) fn bounds(&self) -> Option<(Cell, Cell)> {
if let Some((min_y, max_y)) =
self.iter().map(|(cell, _)| cell.y).minmax().into_option()
{
if let Some((min_x, max_x)) =
self.iter().map(|(cell, _)| cell.x).minmax().into_option()
{
Some((Cell::new(min_x, min_y), Cell::new(max_x, max_y)))
} else {
None
}
} else {
None
}
}
pub fn cell_bounds(&self) -> Option<Bounds> {
if let Some((top_left, top_right)) = self.bounds() {
Some(Bounds::new(top_left, top_right))
} else {
None
}
}
/// shift the cells relative to the top_left most bound
pub(crate) fn localize(self) -> Self {
if let Some((tl, _br)) = self.bounds() {
let mut new_self = Span(vec![]);
for (cell, ch) in self.iter() {
let local_cell = tl.localize_cell(*cell);
new_self.push((local_cell, *ch));
}
new_self
} else {
self
}
}
/// convert this span into fragments applying endorsement
/// of group into fragments
///
///
/// TODO: return the rejects as Span, instead of Contacts
pub(crate) fn endorse(self) -> Endorse<FragmentSpan, Span> {
// try to endorse as circles or arcs
let (mut accepted, un_endorsed_span): (Vec<FragmentSpan>, Span) =
self.endorse_to_arcs_and_circles();
// convert into contacts and try to endorse as rects fragments
let un_endorsed_contacts: Vec<Contacts> = un_endorsed_span.into();
let rect_endorsed: Endorse<FragmentSpan, Contacts> =
Contacts::endorse_rects(un_endorsed_contacts);
accepted.extend(rect_endorsed.accepted);
| accepted,
rejects: vec![],
};
endorsed.extend(re_endorsed);
endorsed
}
/// re try endorsing the contacts into arc and circles by converting it to span first
fn re_endorse(rect_rejects: Vec<Contacts>) -> Endorse<FragmentSpan, Span> {
// convert back to span
let span_rejects: Vec<Span> = rect_rejects
.into_iter()
.map(|contact| contact.span())
.collect();
let span_rejects: Vec<Span> = Span::merge_recursive(span_rejects);
// try to endorse as circles or arcs one more time
let (accepted, rejects): (Vec<Vec<FragmentSpan>>, Vec<Span>) =
span_rejects
.into_iter()
.map(|span| span.endorse_to_arcs_and_circles())
.unzip();
Endorse {
accepted: accepted.into_iter().flatten().collect(),
rejects,
}
}
/// endorse this span into circles, half_circle, quarter_circle only
fn endorse_to_arcs_and_circles(self) -> (Vec<FragmentSpan>, Span) {
let mut accepted = vec![];
let (top_left, _) = self.bounds().expect("must have bounds");
let un_endorsed_span: Span = if let Some((circle, un_endorsed_span)) =
circle_map::endorse_circle_span(&self)
{
let circle = circle.absolute_position(top_left);
let circle_frag_span =
FragmentSpan::new(self.clone(), circle.into());
accepted.push(circle_frag_span);
un_endorsed_span
} else if let Some((three_quarters_arc, un_endorsed_span)) =
circle_map::endorse_three_quarters_arc_span(&self)
{
let three_quarters_arc =
three_quarters_arc.absolute_position(top_left);
let three_quarters_arc_frag_span =
FragmentSpan::new(self.clone(), three_quarters_arc.into());
accepted.push(three_quarters_arc_frag_span);
un_endorsed_span
} else if let Some((half_arc, un_endorsed_span)) =
circle_map::endorse_half_arc_span(&self)
{
let half_arc = half_arc.absolute_position(top_left);
let half_arc_frag_span =
FragmentSpan::new(self.clone(), half_arc.into());
accepted.push(half_arc_frag_span);
un_endorsed_span
} else if let Some((arc, un_endorsed_span)) =
circle_map::endorse_quarter_arc_span(&self)
{
let arc = arc.absolute_position(top_left);
let arc_frag_span = FragmentSpan::new(self.clone(), arc.into());
accepted.push(arc_frag_span);
un_endorsed_span
} else {
self
};
(accepted, un_endorsed_span)
}
/// create a span of the cells that is inside of the start and end bound cells
pub(crate) fn extract(&self, bound1: Cell, bound2: Cell) -> Self {
Span(
self.iter()
.map(|(cell, ch)| (*cell, *ch))
.filter(|(cell, _ch)| cell.is_bounded(bound1, bound2))
.collect(),
)
}
/// returns true if any cell on this span
/// is within the bounds of `bound1` and `bound2`
pub fn is_bounded(&self, bound1: Cell, bound2: Cell) -> bool {
self.iter()
.all(|(cell, ch)| cell.is_bounded(bound1, bound2))
}
pub fn hit_cell(&self, needle: Cell) -> bool {
self.iter().any(|(cell, ch)| *cell == needle)
}
/// merge as is without checking it it can
pub fn merge_no_check(&self, other: &Self) -> Self {
let mut cells = self.0.clone();
cells.extend(&other.0);
Span(cells)
}
}
impl Merge for Span {
fn merge(&self, other: &Self) -> Option<Self> {
if self.can_merge(other) {
Some(self.merge_no_check(other))
} else {
None
}
}
}
impl Bounds {
pub fn new(cell1: Cell, cell2: Cell) -> Self {
let (top_left, bottom_right) = Cell::rearrange_bound(cell1, cell2);
Self {
top_left,
bottom_right,
}
}
pub fn top_left(&self) -> Cell {
self.top_left
}
pub fn bottom_right(&self) -> Cell {
self.bottom_right
}
pub fn top_right(&self) -> Cell {
Cell::new(self.bottom_right.x, self.top_left.y)
}
pub fn bottom_left(&self) -> Cell {
Cell::new(self.top_left.x, self.bottom_right.y)
}
}
/// create a property buffer for all the cells of this span
impl<'p> From<Span> for PropertyBuffer<'p> {
fn from(span: Span) -> Self {
let mut pb = PropertyBuffer::new();
for (cell, ch) in span.iter() {
if let Some(property) = Property::from_char(*ch) {
pb.as_mut().insert(*cell, property);
}
}
pb
}
}
/// Grouping cell by adjacents are not enough
///
/// grouping them together when they are actually connected
/// is the most approprivate way of grouping
/// Span just provides an optimization of the number
/// of elements to be checked.
/// Only elements on the same span are checked to see if they
/// belong on the same group
///
impl From<Span> for Vec<Contacts> {
fn from(span: Span) -> Vec<Contacts> {
let fb = FragmentBuffer::from(span);
let merged_fragments: Vec<FragmentSpan> = fb.merge_fragment_spans();
let contacts: Vec<Contacts> = merged_fragments
.into_iter()
.map(|frag| Contacts::new(frag))
.collect();
Contacts::merge_recursive(contacts)
}
}
/// First we crate a property buffer based on the cell,char content of this span
/// and then based on the property, we extract the accurate fragments
///
/// If a character has no property, try to see if has equivalent fragments from unicode_map
/// otherwise add it to the fragment_buffer as a text fragment
impl From<Span> for FragmentBuffer {
fn from(span: Span) -> FragmentBuffer {
let pb = PropertyBuffer::from(span.clone());
let mut fb = FragmentBuffer::from(pb.clone());
for (cell, ch) in span.iter() {
if pb.as_ref().get(cell).is_none() {
if let Some(fragments) = UNICODE_FRAGMENTS.get(ch) {
fb.add_fragments_to_cell(*cell, *ch, fragments.clone());
} else {
fb.add_fragment_to_cell(
*cell,
*ch,
fragment::cell_text(*ch),
);
}
}
}
fb
}
}
impl fmt::Display for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = StringBuffer::new();
if let Some((tl, _br)) = self.bounds() {
for (cell, ch) in self.iter() {
if *ch!= '\0' &&!ch.is_whitespace() {
let local = tl.localize_cell(*cell);
buffer.add_char(local.x, local.y, *ch);
}
}
}
write!(f, "{}", buffer.to_string())
}
}
#[cfg(test)]
mod test_span; | let re_endorsed = Self::re_endorse(rect_endorsed.rejects);
let mut endorsed = Endorse { | random_line_split |
observing.rs | use std::ffi::CStr;
use std::mem;
use libc::c_void;
use crate::bw;
use crate::bw_1161::{self, vars};
pub unsafe fn process_commands_hook(
data: *const u8,
len: u32,
replay: u32,
orig: unsafe extern fn(*const u8, u32, u32),
) {
if replay == 0 && *vars::current_command_player >= 8 {
// Replace anything sent by observers with a keep alive command, I'm quite sure there will
// be buffer overflows otherwise.
let buf = [0x05u8];
orig(buf.as_ptr(), 1, replay);
} else {
orig(data, len, replay);
}
}
// Don't validate sync commands when observing. As the sync contains visibility info, observers
// are out of sync from everyone else, as their vision settings are not sent to other players.
pub unsafe fn sync_command_hook(
data: *const u8,
orig: unsafe extern fn(*const u8) -> u32,
) -> u32 {
if is_local_player_observer() {
1
} else {
orig(data)
}
}
pub unsafe fn chat_message_hook(
storm_player: u32,
message: *const u8,
length: u32,
orig: unsafe extern fn(u32, *const u8, u32) -> u32,
) -> u32 {
use std::io::Write;
if vars::storm_id_to_human_id[storm_player as usize] >= 8 {
// Observer message, we'll have to manually print text and add to replay recording.
let message = std::slice::from_raw_parts(message, length as usize);
// The length should include null byte
if message.last()!= Some(&0) {
return 0;
}
// There's some unnecessary control information at the start of message
let text = match message.get(2..(message.len() - 1)) {
Some(s) => s,
None => return 0,
};
let mut buf = [0; 512];
let format = |mut pos: &mut [u8], msg_color: u8| -> Result<(), std::io::Error> {
// Write "\x1f{player}: \x02{message}"
// 0x1f is the neutral cyan color and 0x02 is the regular chat message one.
write!(&mut pos, "\x1f")?;
let name = CStr::from_ptr(
vars::storm_players[storm_player as usize].name.as_ptr() as *const i8
);
pos.write_all(name.to_bytes())?;
write!(&mut pos, ": ")?;
pos.write_all(&[msg_color])?;
pos.write_all(text)?;
Ok(())
};
let _ = format(&mut buf[..], 0x02);
let mut replay_command = [0u8; 0x52];
replay_command[0] = 0x5c; // Replay chat
replay_command[1] = 0x8; // Player
let _ = (&mut replay_command[2..]).write(&buf[..]);
replay_command[0x51] = 0;
bw_1161::add_to_replay_data(
*vars::replay_data,
replay_command.as_ptr(),
replay_command.len() as u32,
storm_player,
);
if storm_player == *vars::local_storm_id {
// Switch the message to be green to show it's player's own message
let _ = format(&mut buf[..], 0x07);
}
bw_1161::display_message(buf.as_ptr(), 0);
return length;
} else {
orig(storm_player, message, length)
}
}
pub unsafe fn load_dialog_hook(
dialog: *mut bw::Dialog,
base: *mut c_void,
event_handler: *mut c_void,
source_file: *const u8,
source_line: u32,
orig: unsafe extern fn(*mut bw::Dialog, *mut c_void, *mut c_void, *const u8, u32),
) {
orig(dialog, base, event_handler, source_file, source_line);
if!is_local_player_observer() {
return;
}
let name = CStr::from_ptr((*dialog).control.string as *const i8).to_bytes();
if name == b"TextBox" {
if let Some(to_allies) = find_dialog_child(dialog, 0x2) {
(*to_allies).string = b"To Observers:\0".as_ptr();
// Of course the control has to be resized by hand <.<
// Possibly could also just make it left aligned.
// This can be determined "easily" by breaking 1.16.1 in debugger at 004F2FFF when
// opening chat entry while talking to one player, and replacing the "To player:"
// string, and stepping over the call.
(*to_allies).area.right = 0x55;
} else {
error!("Couldn't find 'To Allies:' control");
}
} else if name == b"MsgFltr" {
if let Some(to_allies) = find_dialog_child(dialog, 0x3) {
(*to_allies).string = b"Send to observers\0".as_ptr();
} else {
error!("Couldn't find 'Send to allies' control");
}
}
}
pub unsafe fn init_ui_variables_hook(orig: unsafe extern fn()) {
orig();
if is_local_player_observer() {
*vars::replay_visions = 0xff;
*vars::player_visions = 0xff;
// To allies (=observers)
(*vars::game).chat_dialog_recipient = 9;
// Could also set the race, it currently just does an overflow read to zerg.
}
}
pub unsafe fn cmdbtn_event_handler_hook(
control: *mut bw::Control,
event: *mut bw::UiEvent,
orig: unsafe extern fn(*mut bw::Control, *mut bw::UiEvent) -> u32,
) -> u32 {
if!is_local_player_observer() {
orig(control, event)
} else {
// Disable clicking on command buttons.
// Event 4 = Left click, 6 = Double click, Extended 3 = Hotkey
if (*event).ty == 0x4 || (*event).ty == 0x6 {
0
} else if (*event).ty == 0xe && (*event).extended_type == 3 {
1
} else {
orig(control, event)
}
}
}
pub unsafe fn get_gluall_string_hook(
string_id: u32,
orig: unsafe extern fn(u32) -> *const u8,
) -> *const u8 {
// Replace "Replay players" text in the alliance dialog when observing
if string_id == 0xb6 && is_local_player_observer() {
"Players\0".as_ptr()
} else {
orig(string_id)
}
}
pub unsafe fn update_net_timeout_players(orig: unsafe extern fn()) {
unsafe fn find_timeout_dialog_player_label(bw_player: u8) -> Option<*mut bw::Control> |
// To make observers appear in network timeout dialog, we temporarily write their info to
// ingame player structure, and revert the change after this function has been called.
let bw_players: &mut [bw::Player] = &mut vars::players[..8];
let actual_players: [bw::Player; 8] = {
let mut players: [bw::Player; 8] = mem::zeroed();
for i in 0..players.len() {
players[i] = bw_players[i].clone();
}
players
};
let mut overwritten_player_id_to_storm = [None; 8];
for storm_id in 0..8 {
let is_obs =!actual_players.iter().any(|x| x.storm_id == storm_id);
if is_obs {
match bw_players
.iter()
.position(|x| x.player_type!= bw::PLAYER_TYPE_HUMAN)
{
Some(pos) => {
overwritten_player_id_to_storm[pos] = Some(storm_id);
bw_players[pos].storm_id = storm_id;
bw_players[pos].player_type = bw::PLAYER_TYPE_HUMAN;
}
None => {
error!(
"Net timeout dialog: Out of player slots for observer, storm id {}",
storm_id
);
}
}
}
}
orig();
for bw_player in 0..8 {
if let Some(storm_id) = overwritten_player_id_to_storm[bw_player] {
if let Some(ctrl) = find_timeout_dialog_player_label(bw_player as u8) {
// We need to redirect the name string to the storm player string, and replace the
// player value to unused player 10, whose color will be set to neutral resource
// color. (The neutral player 11 actually can have a different color for neutral
// buildings)
//
// Technically player 10 can actually have units in some odd UMS maps, but we
// aren't allowing observing UMS games anyways, so whatever. Even if the someone
// noticed the color changing, I doubt they would care.
(*ctrl).string = vars::storm_players[storm_id as usize].name.as_ptr();
(*ctrl).user_ptr = 10usize as *mut c_void;
(*vars::game).player_minimap_color[10] = *vars::resource_minimap_color;
}
}
}
for (i, player) in actual_players.iter().enumerate() {
vars::players[i] = player.clone();
}
}
pub unsafe fn update_command_card_hook(orig: unsafe extern fn()) {
if is_local_player_observer() &&!(*vars::primary_selected).is_null() {
*vars::local_nation_id = (**vars::primary_selected).player as u32;
orig();
*vars::local_nation_id =!0;
} else {
orig();
}
}
pub unsafe fn draw_command_button_hook(
control: *mut bw::Control,
x: i32,
y: i32,
area: *mut c_void,
orig: unsafe extern fn(*mut bw::Control, i32, i32, *mut c_void),
) {
// Need to disable replay flag being set from DrawScreenHook if observing
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 0;
}
orig(control, x, y, area);
*vars::is_replay = was_replay;
}
pub unsafe fn center_screen_on_start_location(
unit: *mut bw::PreplacedUnit,
other: *mut c_void,
orig: unsafe extern fn(*mut bw::PreplacedUnit, *mut c_void) -> u32,
) -> u32 {
let was_replay = *vars::is_replay;
if is_local_player_observer() && vars::players[(*unit).player as usize].player_type!= 0 {
// Center the screen once we get the first active player so observers don't randomly
// end up staring at unused start location.
*vars::is_replay = 1;
}
let result = orig(unit, other);
*vars::is_replay = was_replay;
result
}
unsafe fn find_dialog_child(dialog: *mut bw::Dialog, child_id: i16) -> Option<*mut bw::Control> {
let mut control = (*dialog).first_child;
while!control.is_null() {
if (*control).id == child_id {
return Some(control);
}
control = (*control).next;
}
None
}
unsafe fn is_local_player_observer() -> bool {
// Should probs use shieldbattery's data instead of checking BW variables,
// but we don't have anything that's readily accessible by game thread.
*vars::local_nation_id ==!0
}
pub unsafe fn with_replay_flag_if_obs<F: FnOnce() -> R, R>(func: F) -> R {
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 1;
}
let ret = func();
*vars::is_replay = was_replay;
ret
}
| {
if (*vars::timeout_bin).is_null() {
return None;
}
let mut label = find_dialog_child(*vars::timeout_bin, -10)?;
let mut label_count = 0;
while !label.is_null() && label_count < 8 {
// Flag 0x8 == Shown
if (*label).flags & 0x8 != 0 && (*label).user_ptr as usize == bw_player as usize {
return Some(label);
}
label = (*label).next;
label_count += 1;
}
None
} | identifier_body |
observing.rs | use std::ffi::CStr;
use std::mem;
use libc::c_void;
use crate::bw;
use crate::bw_1161::{self, vars};
pub unsafe fn process_commands_hook(
data: *const u8,
len: u32,
replay: u32,
orig: unsafe extern fn(*const u8, u32, u32),
) {
if replay == 0 && *vars::current_command_player >= 8 {
// Replace anything sent by observers with a keep alive command, I'm quite sure there will
// be buffer overflows otherwise.
let buf = [0x05u8];
orig(buf.as_ptr(), 1, replay);
} else {
orig(data, len, replay);
}
}
// Don't validate sync commands when observing. As the sync contains visibility info, observers
// are out of sync from everyone else, as their vision settings are not sent to other players.
pub unsafe fn sync_command_hook(
data: *const u8,
orig: unsafe extern fn(*const u8) -> u32,
) -> u32 {
if is_local_player_observer() {
1
} else {
orig(data)
}
}
pub unsafe fn | (
storm_player: u32,
message: *const u8,
length: u32,
orig: unsafe extern fn(u32, *const u8, u32) -> u32,
) -> u32 {
use std::io::Write;
if vars::storm_id_to_human_id[storm_player as usize] >= 8 {
// Observer message, we'll have to manually print text and add to replay recording.
let message = std::slice::from_raw_parts(message, length as usize);
// The length should include null byte
if message.last()!= Some(&0) {
return 0;
}
// There's some unnecessary control information at the start of message
let text = match message.get(2..(message.len() - 1)) {
Some(s) => s,
None => return 0,
};
let mut buf = [0; 512];
let format = |mut pos: &mut [u8], msg_color: u8| -> Result<(), std::io::Error> {
// Write "\x1f{player}: \x02{message}"
// 0x1f is the neutral cyan color and 0x02 is the regular chat message one.
write!(&mut pos, "\x1f")?;
let name = CStr::from_ptr(
vars::storm_players[storm_player as usize].name.as_ptr() as *const i8
);
pos.write_all(name.to_bytes())?;
write!(&mut pos, ": ")?;
pos.write_all(&[msg_color])?;
pos.write_all(text)?;
Ok(())
};
let _ = format(&mut buf[..], 0x02);
let mut replay_command = [0u8; 0x52];
replay_command[0] = 0x5c; // Replay chat
replay_command[1] = 0x8; // Player
let _ = (&mut replay_command[2..]).write(&buf[..]);
replay_command[0x51] = 0;
bw_1161::add_to_replay_data(
*vars::replay_data,
replay_command.as_ptr(),
replay_command.len() as u32,
storm_player,
);
if storm_player == *vars::local_storm_id {
// Switch the message to be green to show it's player's own message
let _ = format(&mut buf[..], 0x07);
}
bw_1161::display_message(buf.as_ptr(), 0);
return length;
} else {
orig(storm_player, message, length)
}
}
pub unsafe fn load_dialog_hook(
dialog: *mut bw::Dialog,
base: *mut c_void,
event_handler: *mut c_void,
source_file: *const u8,
source_line: u32,
orig: unsafe extern fn(*mut bw::Dialog, *mut c_void, *mut c_void, *const u8, u32),
) {
orig(dialog, base, event_handler, source_file, source_line);
if!is_local_player_observer() {
return;
}
let name = CStr::from_ptr((*dialog).control.string as *const i8).to_bytes();
if name == b"TextBox" {
if let Some(to_allies) = find_dialog_child(dialog, 0x2) {
(*to_allies).string = b"To Observers:\0".as_ptr();
// Of course the control has to be resized by hand <.<
// Possibly could also just make it left aligned.
// This can be determined "easily" by breaking 1.16.1 in debugger at 004F2FFF when
// opening chat entry while talking to one player, and replacing the "To player:"
// string, and stepping over the call.
(*to_allies).area.right = 0x55;
} else {
error!("Couldn't find 'To Allies:' control");
}
} else if name == b"MsgFltr" {
if let Some(to_allies) = find_dialog_child(dialog, 0x3) {
(*to_allies).string = b"Send to observers\0".as_ptr();
} else {
error!("Couldn't find 'Send to allies' control");
}
}
}
pub unsafe fn init_ui_variables_hook(orig: unsafe extern fn()) {
orig();
if is_local_player_observer() {
*vars::replay_visions = 0xff;
*vars::player_visions = 0xff;
// To allies (=observers)
(*vars::game).chat_dialog_recipient = 9;
// Could also set the race, it currently just does an overflow read to zerg.
}
}
pub unsafe fn cmdbtn_event_handler_hook(
control: *mut bw::Control,
event: *mut bw::UiEvent,
orig: unsafe extern fn(*mut bw::Control, *mut bw::UiEvent) -> u32,
) -> u32 {
if!is_local_player_observer() {
orig(control, event)
} else {
// Disable clicking on command buttons.
// Event 4 = Left click, 6 = Double click, Extended 3 = Hotkey
if (*event).ty == 0x4 || (*event).ty == 0x6 {
0
} else if (*event).ty == 0xe && (*event).extended_type == 3 {
1
} else {
orig(control, event)
}
}
}
pub unsafe fn get_gluall_string_hook(
string_id: u32,
orig: unsafe extern fn(u32) -> *const u8,
) -> *const u8 {
// Replace "Replay players" text in the alliance dialog when observing
if string_id == 0xb6 && is_local_player_observer() {
"Players\0".as_ptr()
} else {
orig(string_id)
}
}
pub unsafe fn update_net_timeout_players(orig: unsafe extern fn()) {
unsafe fn find_timeout_dialog_player_label(bw_player: u8) -> Option<*mut bw::Control> {
if (*vars::timeout_bin).is_null() {
return None;
}
let mut label = find_dialog_child(*vars::timeout_bin, -10)?;
let mut label_count = 0;
while!label.is_null() && label_count < 8 {
// Flag 0x8 == Shown
if (*label).flags & 0x8!= 0 && (*label).user_ptr as usize == bw_player as usize {
return Some(label);
}
label = (*label).next;
label_count += 1;
}
None
}
// To make observers appear in network timeout dialog, we temporarily write their info to
// ingame player structure, and revert the change after this function has been called.
let bw_players: &mut [bw::Player] = &mut vars::players[..8];
let actual_players: [bw::Player; 8] = {
let mut players: [bw::Player; 8] = mem::zeroed();
for i in 0..players.len() {
players[i] = bw_players[i].clone();
}
players
};
let mut overwritten_player_id_to_storm = [None; 8];
for storm_id in 0..8 {
let is_obs =!actual_players.iter().any(|x| x.storm_id == storm_id);
if is_obs {
match bw_players
.iter()
.position(|x| x.player_type!= bw::PLAYER_TYPE_HUMAN)
{
Some(pos) => {
overwritten_player_id_to_storm[pos] = Some(storm_id);
bw_players[pos].storm_id = storm_id;
bw_players[pos].player_type = bw::PLAYER_TYPE_HUMAN;
}
None => {
error!(
"Net timeout dialog: Out of player slots for observer, storm id {}",
storm_id
);
}
}
}
}
orig();
for bw_player in 0..8 {
if let Some(storm_id) = overwritten_player_id_to_storm[bw_player] {
if let Some(ctrl) = find_timeout_dialog_player_label(bw_player as u8) {
// We need to redirect the name string to the storm player string, and replace the
// player value to unused player 10, whose color will be set to neutral resource
// color. (The neutral player 11 actually can have a different color for neutral
// buildings)
//
// Technically player 10 can actually have units in some odd UMS maps, but we
// aren't allowing observing UMS games anyways, so whatever. Even if the someone
// noticed the color changing, I doubt they would care.
(*ctrl).string = vars::storm_players[storm_id as usize].name.as_ptr();
(*ctrl).user_ptr = 10usize as *mut c_void;
(*vars::game).player_minimap_color[10] = *vars::resource_minimap_color;
}
}
}
for (i, player) in actual_players.iter().enumerate() {
vars::players[i] = player.clone();
}
}
pub unsafe fn update_command_card_hook(orig: unsafe extern fn()) {
if is_local_player_observer() &&!(*vars::primary_selected).is_null() {
*vars::local_nation_id = (**vars::primary_selected).player as u32;
orig();
*vars::local_nation_id =!0;
} else {
orig();
}
}
pub unsafe fn draw_command_button_hook(
control: *mut bw::Control,
x: i32,
y: i32,
area: *mut c_void,
orig: unsafe extern fn(*mut bw::Control, i32, i32, *mut c_void),
) {
// Need to disable replay flag being set from DrawScreenHook if observing
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 0;
}
orig(control, x, y, area);
*vars::is_replay = was_replay;
}
pub unsafe fn center_screen_on_start_location(
unit: *mut bw::PreplacedUnit,
other: *mut c_void,
orig: unsafe extern fn(*mut bw::PreplacedUnit, *mut c_void) -> u32,
) -> u32 {
let was_replay = *vars::is_replay;
if is_local_player_observer() && vars::players[(*unit).player as usize].player_type!= 0 {
// Center the screen once we get the first active player so observers don't randomly
// end up staring at unused start location.
*vars::is_replay = 1;
}
let result = orig(unit, other);
*vars::is_replay = was_replay;
result
}
unsafe fn find_dialog_child(dialog: *mut bw::Dialog, child_id: i16) -> Option<*mut bw::Control> {
let mut control = (*dialog).first_child;
while!control.is_null() {
if (*control).id == child_id {
return Some(control);
}
control = (*control).next;
}
None
}
unsafe fn is_local_player_observer() -> bool {
// Should probs use shieldbattery's data instead of checking BW variables,
// but we don't have anything that's readily accessible by game thread.
*vars::local_nation_id ==!0
}
pub unsafe fn with_replay_flag_if_obs<F: FnOnce() -> R, R>(func: F) -> R {
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 1;
}
let ret = func();
*vars::is_replay = was_replay;
ret
}
| chat_message_hook | identifier_name |
observing.rs | use std::ffi::CStr;
use std::mem;
use libc::c_void;
use crate::bw;
use crate::bw_1161::{self, vars};
pub unsafe fn process_commands_hook(
data: *const u8,
len: u32,
replay: u32,
orig: unsafe extern fn(*const u8, u32, u32),
) {
if replay == 0 && *vars::current_command_player >= 8 {
// Replace anything sent by observers with a keep alive command, I'm quite sure there will
// be buffer overflows otherwise.
let buf = [0x05u8];
orig(buf.as_ptr(), 1, replay);
} else {
orig(data, len, replay);
}
}
// Don't validate sync commands when observing. As the sync contains visibility info, observers
// are out of sync from everyone else, as their vision settings are not sent to other players.
pub unsafe fn sync_command_hook(
data: *const u8,
orig: unsafe extern fn(*const u8) -> u32,
) -> u32 {
if is_local_player_observer() {
1
} else {
orig(data)
}
}
pub unsafe fn chat_message_hook(
storm_player: u32,
message: *const u8,
length: u32,
orig: unsafe extern fn(u32, *const u8, u32) -> u32,
) -> u32 {
use std::io::Write;
if vars::storm_id_to_human_id[storm_player as usize] >= 8 {
// Observer message, we'll have to manually print text and add to replay recording.
let message = std::slice::from_raw_parts(message, length as usize);
// The length should include null byte
if message.last()!= Some(&0) {
return 0;
}
// There's some unnecessary control information at the start of message
let text = match message.get(2..(message.len() - 1)) {
Some(s) => s,
None => return 0,
};
let mut buf = [0; 512];
let format = |mut pos: &mut [u8], msg_color: u8| -> Result<(), std::io::Error> {
// Write "\x1f{player}: \x02{message}"
// 0x1f is the neutral cyan color and 0x02 is the regular chat message one.
write!(&mut pos, "\x1f")?;
let name = CStr::from_ptr(
vars::storm_players[storm_player as usize].name.as_ptr() as *const i8
);
pos.write_all(name.to_bytes())?;
write!(&mut pos, ": ")?;
pos.write_all(&[msg_color])?;
pos.write_all(text)?;
Ok(())
};
let _ = format(&mut buf[..], 0x02);
let mut replay_command = [0u8; 0x52];
replay_command[0] = 0x5c; // Replay chat
replay_command[1] = 0x8; // Player
let _ = (&mut replay_command[2..]).write(&buf[..]);
replay_command[0x51] = 0;
bw_1161::add_to_replay_data(
*vars::replay_data,
replay_command.as_ptr(),
replay_command.len() as u32,
storm_player,
);
if storm_player == *vars::local_storm_id {
// Switch the message to be green to show it's player's own message
let _ = format(&mut buf[..], 0x07);
}
bw_1161::display_message(buf.as_ptr(), 0);
return length;
} else {
orig(storm_player, message, length)
}
}
pub unsafe fn load_dialog_hook(
dialog: *mut bw::Dialog,
base: *mut c_void,
event_handler: *mut c_void,
source_file: *const u8,
source_line: u32,
orig: unsafe extern fn(*mut bw::Dialog, *mut c_void, *mut c_void, *const u8, u32),
) {
orig(dialog, base, event_handler, source_file, source_line);
if!is_local_player_observer() {
return;
}
let name = CStr::from_ptr((*dialog).control.string as *const i8).to_bytes();
if name == b"TextBox" {
if let Some(to_allies) = find_dialog_child(dialog, 0x2) {
(*to_allies).string = b"To Observers:\0".as_ptr();
// Of course the control has to be resized by hand <.<
// Possibly could also just make it left aligned.
// This can be determined "easily" by breaking 1.16.1 in debugger at 004F2FFF when
// opening chat entry while talking to one player, and replacing the "To player:"
// string, and stepping over the call.
(*to_allies).area.right = 0x55;
} else {
error!("Couldn't find 'To Allies:' control");
}
} else if name == b"MsgFltr" {
if let Some(to_allies) = find_dialog_child(dialog, 0x3) {
(*to_allies).string = b"Send to observers\0".as_ptr();
} else {
error!("Couldn't find 'Send to allies' control");
}
}
}
pub unsafe fn init_ui_variables_hook(orig: unsafe extern fn()) {
orig();
if is_local_player_observer() {
*vars::replay_visions = 0xff;
*vars::player_visions = 0xff;
// To allies (=observers)
(*vars::game).chat_dialog_recipient = 9;
// Could also set the race, it currently just does an overflow read to zerg.
}
}
pub unsafe fn cmdbtn_event_handler_hook(
control: *mut bw::Control,
event: *mut bw::UiEvent,
orig: unsafe extern fn(*mut bw::Control, *mut bw::UiEvent) -> u32,
) -> u32 {
if!is_local_player_observer() {
orig(control, event)
} else {
// Disable clicking on command buttons.
// Event 4 = Left click, 6 = Double click, Extended 3 = Hotkey
if (*event).ty == 0x4 || (*event).ty == 0x6 {
0
} else if (*event).ty == 0xe && (*event).extended_type == 3 {
1
} else {
orig(control, event)
}
}
}
pub unsafe fn get_gluall_string_hook(
string_id: u32,
orig: unsafe extern fn(u32) -> *const u8,
) -> *const u8 {
// Replace "Replay players" text in the alliance dialog when observing
if string_id == 0xb6 && is_local_player_observer() {
"Players\0".as_ptr()
} else {
orig(string_id)
}
}
pub unsafe fn update_net_timeout_players(orig: unsafe extern fn()) {
unsafe fn find_timeout_dialog_player_label(bw_player: u8) -> Option<*mut bw::Control> {
if (*vars::timeout_bin).is_null() {
return None;
}
let mut label = find_dialog_child(*vars::timeout_bin, -10)?;
let mut label_count = 0;
while!label.is_null() && label_count < 8 {
// Flag 0x8 == Shown
if (*label).flags & 0x8!= 0 && (*label).user_ptr as usize == bw_player as usize {
return Some(label);
}
label = (*label).next;
label_count += 1;
}
None
}
// To make observers appear in network timeout dialog, we temporarily write their info to
// ingame player structure, and revert the change after this function has been called.
let bw_players: &mut [bw::Player] = &mut vars::players[..8];
let actual_players: [bw::Player; 8] = {
let mut players: [bw::Player; 8] = mem::zeroed();
for i in 0..players.len() {
players[i] = bw_players[i].clone();
}
players
};
let mut overwritten_player_id_to_storm = [None; 8];
for storm_id in 0..8 {
let is_obs =!actual_players.iter().any(|x| x.storm_id == storm_id);
if is_obs {
match bw_players
.iter()
.position(|x| x.player_type!= bw::PLAYER_TYPE_HUMAN)
{
Some(pos) => {
overwritten_player_id_to_storm[pos] = Some(storm_id);
bw_players[pos].storm_id = storm_id;
bw_players[pos].player_type = bw::PLAYER_TYPE_HUMAN;
}
None => {
error!(
"Net timeout dialog: Out of player slots for observer, storm id {}",
storm_id
);
}
}
}
}
orig();
for bw_player in 0..8 {
if let Some(storm_id) = overwritten_player_id_to_storm[bw_player] {
if let Some(ctrl) = find_timeout_dialog_player_label(bw_player as u8) {
// We need to redirect the name string to the storm player string, and replace the
// player value to unused player 10, whose color will be set to neutral resource
// color. (The neutral player 11 actually can have a different color for neutral
// buildings)
//
// Technically player 10 can actually have units in some odd UMS maps, but we
// aren't allowing observing UMS games anyways, so whatever. Even if the someone
// noticed the color changing, I doubt they would care.
(*ctrl).string = vars::storm_players[storm_id as usize].name.as_ptr();
(*ctrl).user_ptr = 10usize as *mut c_void;
(*vars::game).player_minimap_color[10] = *vars::resource_minimap_color;
}
}
}
for (i, player) in actual_players.iter().enumerate() {
vars::players[i] = player.clone();
}
}
pub unsafe fn update_command_card_hook(orig: unsafe extern fn()) {
if is_local_player_observer() &&!(*vars::primary_selected).is_null() {
*vars::local_nation_id = (**vars::primary_selected).player as u32;
orig();
*vars::local_nation_id =!0;
} else {
orig();
}
}
pub unsafe fn draw_command_button_hook(
control: *mut bw::Control,
x: i32,
y: i32,
area: *mut c_void,
orig: unsafe extern fn(*mut bw::Control, i32, i32, *mut c_void),
) {
// Need to disable replay flag being set from DrawScreenHook if observing
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 0;
}
orig(control, x, y, area);
*vars::is_replay = was_replay;
}
pub unsafe fn center_screen_on_start_location(
unit: *mut bw::PreplacedUnit,
other: *mut c_void,
orig: unsafe extern fn(*mut bw::PreplacedUnit, *mut c_void) -> u32,
) -> u32 {
let was_replay = *vars::is_replay;
if is_local_player_observer() && vars::players[(*unit).player as usize].player_type!= 0 {
// Center the screen once we get the first active player so observers don't randomly
// end up staring at unused start location.
*vars::is_replay = 1;
}
let result = orig(unit, other);
*vars::is_replay = was_replay;
result
}
unsafe fn find_dialog_child(dialog: *mut bw::Dialog, child_id: i16) -> Option<*mut bw::Control> {
let mut control = (*dialog).first_child;
while!control.is_null() {
if (*control).id == child_id {
return Some(control);
}
control = (*control).next; | unsafe fn is_local_player_observer() -> bool {
// Should probs use shieldbattery's data instead of checking BW variables,
// but we don't have anything that's readily accessible by game thread.
*vars::local_nation_id ==!0
}
pub unsafe fn with_replay_flag_if_obs<F: FnOnce() -> R, R>(func: F) -> R {
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 1;
}
let ret = func();
*vars::is_replay = was_replay;
ret
} | }
None
}
| random_line_split |
observing.rs | use std::ffi::CStr;
use std::mem;
use libc::c_void;
use crate::bw;
use crate::bw_1161::{self, vars};
pub unsafe fn process_commands_hook(
data: *const u8,
len: u32,
replay: u32,
orig: unsafe extern fn(*const u8, u32, u32),
) {
if replay == 0 && *vars::current_command_player >= 8 {
// Replace anything sent by observers with a keep alive command, I'm quite sure there will
// be buffer overflows otherwise.
let buf = [0x05u8];
orig(buf.as_ptr(), 1, replay);
} else {
orig(data, len, replay);
}
}
// Don't validate sync commands when observing. As the sync contains visibility info, observers
// are out of sync from everyone else, as their vision settings are not sent to other players.
pub unsafe fn sync_command_hook(
data: *const u8,
orig: unsafe extern fn(*const u8) -> u32,
) -> u32 {
if is_local_player_observer() {
1
} else {
orig(data)
}
}
pub unsafe fn chat_message_hook(
storm_player: u32,
message: *const u8,
length: u32,
orig: unsafe extern fn(u32, *const u8, u32) -> u32,
) -> u32 {
use std::io::Write;
if vars::storm_id_to_human_id[storm_player as usize] >= 8 {
// Observer message, we'll have to manually print text and add to replay recording.
let message = std::slice::from_raw_parts(message, length as usize);
// The length should include null byte
if message.last()!= Some(&0) {
return 0;
}
// There's some unnecessary control information at the start of message
let text = match message.get(2..(message.len() - 1)) {
Some(s) => s,
None => return 0,
};
let mut buf = [0; 512];
let format = |mut pos: &mut [u8], msg_color: u8| -> Result<(), std::io::Error> {
// Write "\x1f{player}: \x02{message}"
// 0x1f is the neutral cyan color and 0x02 is the regular chat message one.
write!(&mut pos, "\x1f")?;
let name = CStr::from_ptr(
vars::storm_players[storm_player as usize].name.as_ptr() as *const i8
);
pos.write_all(name.to_bytes())?;
write!(&mut pos, ": ")?;
pos.write_all(&[msg_color])?;
pos.write_all(text)?;
Ok(())
};
let _ = format(&mut buf[..], 0x02);
let mut replay_command = [0u8; 0x52];
replay_command[0] = 0x5c; // Replay chat
replay_command[1] = 0x8; // Player
let _ = (&mut replay_command[2..]).write(&buf[..]);
replay_command[0x51] = 0;
bw_1161::add_to_replay_data(
*vars::replay_data,
replay_command.as_ptr(),
replay_command.len() as u32,
storm_player,
);
if storm_player == *vars::local_storm_id {
// Switch the message to be green to show it's player's own message
let _ = format(&mut buf[..], 0x07);
}
bw_1161::display_message(buf.as_ptr(), 0);
return length;
} else {
orig(storm_player, message, length)
}
}
pub unsafe fn load_dialog_hook(
dialog: *mut bw::Dialog,
base: *mut c_void,
event_handler: *mut c_void,
source_file: *const u8,
source_line: u32,
orig: unsafe extern fn(*mut bw::Dialog, *mut c_void, *mut c_void, *const u8, u32),
) {
orig(dialog, base, event_handler, source_file, source_line);
if!is_local_player_observer() {
return;
}
let name = CStr::from_ptr((*dialog).control.string as *const i8).to_bytes();
if name == b"TextBox" {
if let Some(to_allies) = find_dialog_child(dialog, 0x2) {
(*to_allies).string = b"To Observers:\0".as_ptr();
// Of course the control has to be resized by hand <.<
// Possibly could also just make it left aligned.
// This can be determined "easily" by breaking 1.16.1 in debugger at 004F2FFF when
// opening chat entry while talking to one player, and replacing the "To player:"
// string, and stepping over the call.
(*to_allies).area.right = 0x55;
} else {
error!("Couldn't find 'To Allies:' control");
}
} else if name == b"MsgFltr" {
if let Some(to_allies) = find_dialog_child(dialog, 0x3) {
(*to_allies).string = b"Send to observers\0".as_ptr();
} else {
error!("Couldn't find 'Send to allies' control");
}
}
}
pub unsafe fn init_ui_variables_hook(orig: unsafe extern fn()) {
orig();
if is_local_player_observer() {
*vars::replay_visions = 0xff;
*vars::player_visions = 0xff;
// To allies (=observers)
(*vars::game).chat_dialog_recipient = 9;
// Could also set the race, it currently just does an overflow read to zerg.
}
}
pub unsafe fn cmdbtn_event_handler_hook(
control: *mut bw::Control,
event: *mut bw::UiEvent,
orig: unsafe extern fn(*mut bw::Control, *mut bw::UiEvent) -> u32,
) -> u32 {
if!is_local_player_observer() {
orig(control, event)
} else {
// Disable clicking on command buttons.
// Event 4 = Left click, 6 = Double click, Extended 3 = Hotkey
if (*event).ty == 0x4 || (*event).ty == 0x6 {
0
} else if (*event).ty == 0xe && (*event).extended_type == 3 {
1
} else {
orig(control, event)
}
}
}
pub unsafe fn get_gluall_string_hook(
string_id: u32,
orig: unsafe extern fn(u32) -> *const u8,
) -> *const u8 {
// Replace "Replay players" text in the alliance dialog when observing
if string_id == 0xb6 && is_local_player_observer() {
"Players\0".as_ptr()
} else {
orig(string_id)
}
}
pub unsafe fn update_net_timeout_players(orig: unsafe extern fn()) {
unsafe fn find_timeout_dialog_player_label(bw_player: u8) -> Option<*mut bw::Control> {
if (*vars::timeout_bin).is_null() {
return None;
}
let mut label = find_dialog_child(*vars::timeout_bin, -10)?;
let mut label_count = 0;
while!label.is_null() && label_count < 8 {
// Flag 0x8 == Shown
if (*label).flags & 0x8!= 0 && (*label).user_ptr as usize == bw_player as usize {
return Some(label);
}
label = (*label).next;
label_count += 1;
}
None
}
// To make observers appear in network timeout dialog, we temporarily write their info to
// ingame player structure, and revert the change after this function has been called.
let bw_players: &mut [bw::Player] = &mut vars::players[..8];
let actual_players: [bw::Player; 8] = {
let mut players: [bw::Player; 8] = mem::zeroed();
for i in 0..players.len() {
players[i] = bw_players[i].clone();
}
players
};
let mut overwritten_player_id_to_storm = [None; 8];
for storm_id in 0..8 {
let is_obs =!actual_players.iter().any(|x| x.storm_id == storm_id);
if is_obs {
match bw_players
.iter()
.position(|x| x.player_type!= bw::PLAYER_TYPE_HUMAN)
{
Some(pos) => {
overwritten_player_id_to_storm[pos] = Some(storm_id);
bw_players[pos].storm_id = storm_id;
bw_players[pos].player_type = bw::PLAYER_TYPE_HUMAN;
}
None => {
error!(
"Net timeout dialog: Out of player slots for observer, storm id {}",
storm_id
);
}
}
}
}
orig();
for bw_player in 0..8 {
if let Some(storm_id) = overwritten_player_id_to_storm[bw_player] {
if let Some(ctrl) = find_timeout_dialog_player_label(bw_player as u8) |
}
}
for (i, player) in actual_players.iter().enumerate() {
vars::players[i] = player.clone();
}
}
pub unsafe fn update_command_card_hook(orig: unsafe extern fn()) {
if is_local_player_observer() &&!(*vars::primary_selected).is_null() {
*vars::local_nation_id = (**vars::primary_selected).player as u32;
orig();
*vars::local_nation_id =!0;
} else {
orig();
}
}
pub unsafe fn draw_command_button_hook(
control: *mut bw::Control,
x: i32,
y: i32,
area: *mut c_void,
orig: unsafe extern fn(*mut bw::Control, i32, i32, *mut c_void),
) {
// Need to disable replay flag being set from DrawScreenHook if observing
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 0;
}
orig(control, x, y, area);
*vars::is_replay = was_replay;
}
pub unsafe fn center_screen_on_start_location(
unit: *mut bw::PreplacedUnit,
other: *mut c_void,
orig: unsafe extern fn(*mut bw::PreplacedUnit, *mut c_void) -> u32,
) -> u32 {
let was_replay = *vars::is_replay;
if is_local_player_observer() && vars::players[(*unit).player as usize].player_type!= 0 {
// Center the screen once we get the first active player so observers don't randomly
// end up staring at unused start location.
*vars::is_replay = 1;
}
let result = orig(unit, other);
*vars::is_replay = was_replay;
result
}
unsafe fn find_dialog_child(dialog: *mut bw::Dialog, child_id: i16) -> Option<*mut bw::Control> {
let mut control = (*dialog).first_child;
while!control.is_null() {
if (*control).id == child_id {
return Some(control);
}
control = (*control).next;
}
None
}
unsafe fn is_local_player_observer() -> bool {
// Should probs use shieldbattery's data instead of checking BW variables,
// but we don't have anything that's readily accessible by game thread.
*vars::local_nation_id ==!0
}
pub unsafe fn with_replay_flag_if_obs<F: FnOnce() -> R, R>(func: F) -> R {
let was_replay = *vars::is_replay;
if is_local_player_observer() {
*vars::is_replay = 1;
}
let ret = func();
*vars::is_replay = was_replay;
ret
}
| {
// We need to redirect the name string to the storm player string, and replace the
// player value to unused player 10, whose color will be set to neutral resource
// color. (The neutral player 11 actually can have a different color for neutral
// buildings)
//
// Technically player 10 can actually have units in some odd UMS maps, but we
// aren't allowing observing UMS games anyways, so whatever. Even if the someone
// noticed the color changing, I doubt they would care.
(*ctrl).string = vars::storm_players[storm_id as usize].name.as_ptr();
(*ctrl).user_ptr = 10usize as *mut c_void;
(*vars::game).player_minimap_color[10] = *vars::resource_minimap_color;
} | conditional_block |
uint.rs | let v = (self.value[i] as u64) + (rhs.value[i] as u64) + carry;
self.value[i] = v as u32;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
fn sub(&self, rhs: &Self) -> Self {
let mut out = self.clone();
out.sub_assign(rhs);
out
}
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
fn sub_assign(&mut self, rhs: &Self) {
assert!(!self.overflowing_sub_assign(rhs));
}
fn mul(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, self.bit_width() + rhs.bit_width());
self.mul_to(rhs, &mut out);
out
}
/// O(n^2) multiplication. Assumes that u64*u64 multiplication is always
/// constant time.
///
/// 'out' must be twice the size of
fn mul_to(&self, rhs: &Self, out: &mut Self) {
out.assign_zero();
let mut overflowed = false;
for i in 0..self.value.len() {
let mut carry = 0;
for j in 0..rhs.value.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((self.value[i] as u64) * (rhs.value[j] as u64))
+ (out.value[i + j] as u64)
+ carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as BaseType;
}
// assert!(carry <= u32::max_value() as u64);
if i + rhs.value.len() < out.value.len() {
out.value[i + rhs.value.len()] = carry as BaseType;
} else {
overflowed |= carry!= 0;
}
}
assert!(!overflowed);
}
fn bit(&self, i: usize) -> usize {
((self.value[i / BASE_BITS] >> (i % BASE_BITS)) & 0b01) as usize
}
fn set_bit(&mut self, i: usize, v: usize) {
assert!(v == 0 || v == 1);
let ii = i / BASE_BITS;
let shift = i % BASE_BITS;
let mask =!(1 << shift);
self.value[ii] = (self.value[ii] & mask) | ((v as BaseType) << shift);
}
/// Computes the quotient and remainder of'self / rhs'.
///
/// Any mixture of input bit_widths is supported.
/// Internally this uses binary long division.
///
/// NOTE: This is very slow and should be avoided if possible.
///
/// Returns a tuple of '(self / rhs, self % rhs)' where the quotient is the
/// same width as'self' and the remainder is the same width as 'rhs'.
fn quorem(&self, rhs: &Self) -> (Self, Self) {
let mut q = Self::from_usize(0, self.bit_width()); // Range is [0, Self]
let mut r = Self::from_usize(0, rhs.bit_width()); // Range is [0, rhs).
// TODO: Implement a bit iterator so set_bit requires less work.
for i in (0..self.bit_width()).rev() {
let carry = r.shl();
r.set_bit(0, self.bit(i));
let mut next_r = Self::from_usize(0, rhs.bit_width());
// If there is a carry, then we know that r might be > rhs when the shl also has
// a carry.
let carry2 = r.overflowing_sub_to(rhs, &mut next_r);
let subtract = (carry!= 0) == carry2;
next_r.copy_if(subtract, &mut r);
q.set_bit(i, if subtract { 1 } else { 0 });
}
(q, r)
}
fn value_bits(&self) -> usize {
for i in (0..self.value.len()).rev() {
let zeros = self.value[i].leading_zeros() as usize;
if zeros == BASE_BITS {
continue;
}
return (i * BASE_BITS) + (BASE_BITS - zeros);
}
0
}
fn bit_width(&self) -> usize {
self.value.len() * BASE_BITS
}
}
impl SecureBigUint {
pub fn byte_width(&self) -> usize {
self.value.len() * BASE_BYTES
}
/// Multiplies two numbers and adds their result to the out number.
/// out += self*rhs
pub(super) fn add_mul_to(&self, rhs: &Self, out: &mut Self) {
let a = &self.value[..];
let b = &rhs.value[..];
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((a[i] as u64) * (b[j] as u64)) + (out.value[i + j] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as u32;
}
for k in (i + b.len())..out.value.len() {
let tmp = (out.value[k] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[k] = tmp as u32;
}
}
}
/// Copies'self' to 'out' if should_copy is true. In all cases, this takes
/// a constant amount of time to execute.
///
/// NOTE:'self' and 'out' must have the same bit_width().
#[inline(never)]
pub fn copy_if(&self, should_copy: bool, out: &mut Self) {
assert_eq!(self.value.len(), out.value.len());
// Will be 0b111...111 if should_copy else 0.
let self_mask = (!(should_copy as BaseType)).wrapping_add(1);
let out_mask =!self_mask;
for (self_v, out_v) in self.value.iter().zip(out.value.iter_mut()) {
*out_v = (*self_v & self_mask).wrapping_add(*out_v & out_mask);
}
}
/// Swaps the contents of'self' and 'other' if'should_swap' is true.
///
/// The actual values of both integers are swapped rather than swapping any
/// internal memory pointers so that'should_swap' can not be inferred from
/// the memory locations of the final integers.
///
/// At a given integer bit_width, this should always take the same amount of
/// CPU cycles to execute.
#[inline(never)]
pub fn swap_if(&mut self, other: &mut Self, should_swap: bool) {
assert_eq!(self.value.len(), other.value.len());
// Will be 0b111...111 if should_swap else 0.
let mask = (!(should_swap as BaseType)).wrapping_add(1);
for (self_v, other_v) in self.value.iter_mut().zip(other.value.iter_mut()) {
// Will be 0 if we don't want to swap.
let filter = mask & (*self_v ^ *other_v);
*self_v ^= filter;
*other_v ^= filter;
}
}
/// In-place reverses all the order of all bits in this integer.
pub fn reverse_bits(&mut self) {
let mid = (self.value.len() + 1) / 2;
for i in 0..mid {
let j = self.value.len() - 1 - i;
// Swap if we are not at the middle limb (only relevant if we have an odd number
// of limbs).
if i!= j {
self.value.swap(i, j);
self.value[j] = self.value[j].reverse_bits();
}
self.value[i] = self.value[i].reverse_bits();
}
}
/// Performs'self ^= rhs' only if'should_apply' is true.
pub fn xor_assign_if(&mut self, should_apply: bool, rhs: &Self) {
assert_eq!(self.value.len(), rhs.value.len());
// Will be 0b111...111 if should_apply else 0.
let mask = (!(should_apply as BaseType)).wrapping_add(1);
for i in 0..self.value.len() {
self.value[i] ^= rhs.value[i] & mask;
}
}
pub fn discard(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
self.value.truncate(n);
}
///
pub fn truncate(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
// TODO: Also zero out any high bits
for i in n..self.value.len() {
assert_eq!(self.value[i], 0);
}
self.value.truncate(n);
}
/// Computes 2^n more efficiently than using pow().
/// Only supports exponents smaller than u32.
/// TODO: Just take as input a u32 directly.
pub fn exp2(n: u32, bit_width: usize) -> Self {
let mut out = Self::from_usize(0, bit_width);
out.set_bit(n as usize, 1);
out
}
pub fn is_zero(&self) -> bool {
let mut is = true;
for v in &self.value {
is &= *v == 0;
}
is
}
/// TODO: Improve the constant time behavior of this.
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
pub(super) fn overflowing_sub_assign(&mut self, rhs: &Self) -> bool {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
// rhs is allowed to be narrower than self
let r_i = if i < rhs.value.len() { rhs.value[i] } else { 0 };
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (self.value[i] as i64) - (r_i as i64) + carry;
if v < 0 {
self.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
self.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
pub(super) fn overflowing_sub_to(&self, rhs: &Self, out: &mut Self) -> bool {
let mut carry = 0;
let n = out.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (a as i64) - (b as i64) + carry;
if v < 0 {
out.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
out.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
/// Performs modular reduction using up to one subtraction of the modulus
/// from the value.
///
/// Will panic if'self' was >= 2*modulus
pub fn reduce_once(&mut self, modulus: &Self) {
let mut reduced = Self::from_usize(0, self.bit_width());
let overflow = self.overflowing_sub_to(modulus, &mut reduced);
reduced.copy_if(!overflow, self);
self.truncate(modulus.bit_width());
}
#[must_use]
pub fn shl(&mut self) -> BaseType {
let mut carry = 0;
for v in self.value.iter_mut() {
let (new_v, _) = v.overflowing_shl(1);
let new_carry = *v >> 31;
*v = new_v | carry;
carry = new_carry;
}
carry
}
pub fn shr(&mut self) {
let mut carry = 0;
for v in self.value.iter_mut().rev() {
let (new_v, _) = v.overflowing_shr(1);
let new_carry = *v & 1;
*v = new_v | (carry << 31);
carry = new_carry;
}
}
/// Computes'self >>= n'
/// NOTE: We assume that 'n' is a publicly known constant.
pub fn shr_n(&mut self, n: usize) {
let byte_shift = n / BASE_BITS;
let carry_size = n % BASE_BITS;
let carry_mask = ((1 as BaseType) << carry_size).wrapping_sub(1);
for i in 0..self.value.len() {
let v = self.value[i];
self.value[i] = 0;
if i < byte_shift {
continue;
}
let j = i - byte_shift;
self.value[j] = v >> carry_size;
if carry_size!= 0 && j > 0 {
let carry = v & carry_mask;
self.value[j - 1] |= carry << (BASE_BITS - carry_size);
}
}
}
/// Computes self >>= BASE_BITS.
pub(super) fn shr_base(&mut self) {
assert_eq!(self.value[0], 0);
for j in 1..self.value.len() {
self.value[j - 1] = self.value[j];
}
let k = self.value.len();
self.value[k - 1] = 0;
}
pub fn and_assign(&mut self, rhs: &Self) {
for i in 0..self.value.len() {
self.value[i] &= rhs.value[i];
}
}
/// Efficienctly (in O(1) time) computes'self % 2^32'
pub fn mod_word(&self) -> u32 {
if self.value.len() == 0 {
0
} else {
self.value[0]
}
}
// TODO: Need a version of this using pmull in aarch64 (vmull_p64)
/// Interprates this integer and 'rhs' as polynomials over GF(2^n) and
/// multiplies them into 'out'.
///
/// Operations in this field:
/// - Addition is XOR
/// - Multiplication is AND
#[cfg(all(target_arch = "x86_64", target_feature = "pclmulqdq"))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
use crate::intrinsics::*;
use core::arch::x86_64::_mm_clmulepi64_si128;
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..self.value.len() {
let a = u64_to_m128i(self.value[i] as u64);
for j in 0..rhs.value.len() {
let b = u64_to_m128i(rhs.value[j] as u64);
let r = u64_from_m128i(unsafe { _mm_clmulepi64_si128(a, b, 0) });
let rl = r as u32;
let rh = (r >> 32) as u32;
// Add to output
out.value[i + j] ^= rl;
out.value[i + j + 1] ^= rh;
}
}
}
// TODO: Finish making this constant time and correct.
#[cfg(not(all(target_arch = "x86_64", target_feature = "pclmulqdq")))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..b.value_bits() {
out.xor_assign_if(b.bit(i) == 1, &a);
a.shl();
}
}
// TODO: Move to a shared utility.
pub fn to_string_radix(&self, radix: u32) -> alloc::string::String {
// TODO: These should be global constants (as well as one)
let zero = Self::from_usize(0, self.bit_width());
let div = Self::from_usize(radix as usize, 32);
let mut s = alloc::string::String::new();
let mut tmp = self.clone();
while tmp > zero {
// TODO: We can divide by a larger power of 10 to make this more efficient.
let (q, r) = tmp.quorem(&div);
tmp = q;
// TODO: Very inefficient
s.insert(
0,
core::char::from_digit(r.value.first().cloned().unwrap_or(0), radix).unwrap(),
);
}
if s.len() == 0 {
s.push('0');
}
s
}
/// Resets the value of the integer to 0.
pub fn assign_zero(&mut self) {
for v in self.value.iter_mut() {
*v = 0;
}
}
/// In-place increases the size
pub fn extend(&mut self, bit_width: usize) {
let new_len = ceil_div(bit_width, BASE_BITS);
assert!(new_len >= self.value.len());
self.value.resize(new_len, 0);
}
pub fn from_str(s: &str, bit_width: usize) -> common::errors::Result<Self> {
let ten = SecureBigUint::from_usize(10, 32);
let mut out = Self::from_usize(0, bit_width);
for c in s.chars() {
let digit = c
.to_digit(10)
.ok_or(common::errors::err_msg("Invalid digit"))?;
let tmp = out.clone();
ten.mul_to(&tmp, &mut out);
out += SecureBigUint::from_usize(digit as usize, bit_width);
// out = (&out * &ten) + &(digit as usize).into();
}
Ok(out)
}
}
impl core::fmt::Display for SecureBigUint {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.to_string_radix(10))
}
}
impl Ord for SecureBigUint {
fn cmp(&self, other: &Self) -> Ordering {
let mut less = 0;
let mut greater = 0;
let n = core::cmp::max(self.value.len(), other.value.len());
for i in (0..n).rev() {
let mask =!(less | greater);
let a = self.value.get(i).cloned().unwrap_or(0);
let b = other.value.get(i).cloned().unwrap_or(0);
if a < b {
less |= mask & 1;
} else if a > b {
greater |= mask & 1;
}
}
let cmp = (less << 1) | greater;
let mut out = Ordering::Equal;
// Exactly one of these if statements should always be triggered.
if cmp == 0b10 {
out = Ordering::Less;
}
if cmp == 0b01 {
out = Ordering::Greater;
}
if cmp == 0b00 | {
out = Ordering::Equal;
} | conditional_block |
|
uint.rs | from_be_bytes(*array_ref![
data,
data.len() - (BASE_BYTES * (i + 1)),
BASE_BYTES
]);
}
let rem = data.len() % BASE_BYTES;
if rem!= 0 {
let mut rest = [0u8; BASE_BYTES];
rest[(BASE_BYTES - rem)..].copy_from_slice(&data[0..rem]);
out.value[n] = BaseType::from_be_bytes(rest);
}
out
}
fn to_be_bytes(&self) -> Vec<u8> {
let mut data = vec![];
data.reserve_exact(self.value.len() * 4);
for v in self.value.iter().rev() {
data.extend_from_slice(&v.to_be_bytes());
}
data
}
/// Computes and returns'self + rhs'. The output buffer will be 1 bit
/// larger than the inputs to accomadate possible overflow.
fn add(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, core::cmp::max(self.bit_width(), rhs.bit_width()) + 1);
self.add_to(rhs, &mut out);
out
}
/// Computes 'output = self + rhs'. It is the user's responsibility to
/// ensure that the
fn add_to(&self, rhs: &Self, output: &mut Self) {
assert!(output.value.len() >= self.value.len());
assert!(output.value.len() >= rhs.value.len());
let mut carry = 0;
// TODO: Always loop through max(self, rhs, output) length so we know for sure
// that all carries are handled.
let n = output.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
let v = (a as u64) + (b as u64) + carry;
output.value[i] = v as BaseType;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
/// Computes'self += rhs'.
fn add_assign(&mut self, rhs: &Self) {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
let v = (self.value[i] as u64) + (rhs.value[i] as u64) + carry;
self.value[i] = v as u32;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
fn sub(&self, rhs: &Self) -> Self {
let mut out = self.clone();
out.sub_assign(rhs);
out
}
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
fn sub_assign(&mut self, rhs: &Self) {
assert!(!self.overflowing_sub_assign(rhs));
}
fn mul(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, self.bit_width() + rhs.bit_width());
self.mul_to(rhs, &mut out);
out
}
/// O(n^2) multiplication. Assumes that u64*u64 multiplication is always
/// constant time.
///
/// 'out' must be twice the size of
fn mul_to(&self, rhs: &Self, out: &mut Self) {
out.assign_zero();
let mut overflowed = false;
for i in 0..self.value.len() {
let mut carry = 0;
for j in 0..rhs.value.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((self.value[i] as u64) * (rhs.value[j] as u64))
+ (out.value[i + j] as u64)
+ carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as BaseType;
}
// assert!(carry <= u32::max_value() as u64);
if i + rhs.value.len() < out.value.len() {
out.value[i + rhs.value.len()] = carry as BaseType;
} else {
overflowed |= carry!= 0;
}
}
assert!(!overflowed);
}
fn bit(&self, i: usize) -> usize {
((self.value[i / BASE_BITS] >> (i % BASE_BITS)) & 0b01) as usize
}
fn set_bit(&mut self, i: usize, v: usize) {
assert!(v == 0 || v == 1);
let ii = i / BASE_BITS;
let shift = i % BASE_BITS;
let mask =!(1 << shift);
self.value[ii] = (self.value[ii] & mask) | ((v as BaseType) << shift);
}
/// Computes the quotient and remainder of'self / rhs'.
///
/// Any mixture of input bit_widths is supported.
/// Internally this uses binary long division.
///
/// NOTE: This is very slow and should be avoided if possible.
///
/// Returns a tuple of '(self / rhs, self % rhs)' where the quotient is the
/// same width as'self' and the remainder is the same width as 'rhs'.
fn quorem(&self, rhs: &Self) -> (Self, Self) {
let mut q = Self::from_usize(0, self.bit_width()); // Range is [0, Self]
let mut r = Self::from_usize(0, rhs.bit_width()); // Range is [0, rhs).
// TODO: Implement a bit iterator so set_bit requires less work.
for i in (0..self.bit_width()).rev() {
let carry = r.shl();
r.set_bit(0, self.bit(i));
let mut next_r = Self::from_usize(0, rhs.bit_width());
// If there is a carry, then we know that r might be > rhs when the shl also has
// a carry.
let carry2 = r.overflowing_sub_to(rhs, &mut next_r);
let subtract = (carry!= 0) == carry2;
next_r.copy_if(subtract, &mut r);
q.set_bit(i, if subtract { 1 } else { 0 });
}
(q, r)
}
fn value_bits(&self) -> usize {
for i in (0..self.value.len()).rev() {
let zeros = self.value[i].leading_zeros() as usize;
if zeros == BASE_BITS {
continue;
}
return (i * BASE_BITS) + (BASE_BITS - zeros);
}
0
}
fn bit_width(&self) -> usize {
self.value.len() * BASE_BITS
}
}
impl SecureBigUint {
pub fn byte_width(&self) -> usize {
self.value.len() * BASE_BYTES
}
/// Multiplies two numbers and adds their result to the out number.
/// out += self*rhs
pub(super) fn add_mul_to(&self, rhs: &Self, out: &mut Self) {
let a = &self.value[..];
let b = &rhs.value[..];
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((a[i] as u64) * (b[j] as u64)) + (out.value[i + j] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as u32;
}
for k in (i + b.len())..out.value.len() {
let tmp = (out.value[k] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[k] = tmp as u32;
}
}
}
/// Copies'self' to 'out' if should_copy is true. In all cases, this takes
/// a constant amount of time to execute.
///
/// NOTE:'self' and 'out' must have the same bit_width().
#[inline(never)]
pub fn copy_if(&self, should_copy: bool, out: &mut Self) {
assert_eq!(self.value.len(), out.value.len());
// Will be 0b111...111 if should_copy else 0.
let self_mask = (!(should_copy as BaseType)).wrapping_add(1);
let out_mask =!self_mask;
for (self_v, out_v) in self.value.iter().zip(out.value.iter_mut()) {
*out_v = (*self_v & self_mask).wrapping_add(*out_v & out_mask);
}
}
/// Swaps the contents of'self' and 'other' if'should_swap' is true.
///
/// The actual values of both integers are swapped rather than swapping any
/// internal memory pointers so that'should_swap' can not be inferred from
/// the memory locations of the final integers.
///
/// At a given integer bit_width, this should always take the same amount of
/// CPU cycles to execute.
#[inline(never)]
pub fn swap_if(&mut self, other: &mut Self, should_swap: bool) {
assert_eq!(self.value.len(), other.value.len());
// Will be 0b111...111 if should_swap else 0.
let mask = (!(should_swap as BaseType)).wrapping_add(1);
for (self_v, other_v) in self.value.iter_mut().zip(other.value.iter_mut()) {
// Will be 0 if we don't want to swap.
let filter = mask & (*self_v ^ *other_v);
*self_v ^= filter;
*other_v ^= filter;
}
}
/// In-place reverses all the order of all bits in this integer.
pub fn reverse_bits(&mut self) {
let mid = (self.value.len() + 1) / 2;
for i in 0..mid {
let j = self.value.len() - 1 - i;
// Swap if we are not at the middle limb (only relevant if we have an odd number
// of limbs).
if i!= j {
self.value.swap(i, j);
self.value[j] = self.value[j].reverse_bits();
}
self.value[i] = self.value[i].reverse_bits();
}
}
/// Performs'self ^= rhs' only if'should_apply' is true.
pub fn xor_assign_if(&mut self, should_apply: bool, rhs: &Self) {
assert_eq!(self.value.len(), rhs.value.len());
// Will be 0b111...111 if should_apply else 0.
let mask = (!(should_apply as BaseType)).wrapping_add(1);
for i in 0..self.value.len() {
self.value[i] ^= rhs.value[i] & mask;
}
}
pub fn discard(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
self.value.truncate(n);
}
///
pub fn truncate(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
// TODO: Also zero out any high bits
for i in n..self.value.len() {
assert_eq!(self.value[i], 0);
}
self.value.truncate(n);
}
/// Computes 2^n more efficiently than using pow().
/// Only supports exponents smaller than u32.
/// TODO: Just take as input a u32 directly.
pub fn exp2(n: u32, bit_width: usize) -> Self {
let mut out = Self::from_usize(0, bit_width);
out.set_bit(n as usize, 1);
out
}
pub fn is_zero(&self) -> bool {
let mut is = true;
for v in &self.value {
is &= *v == 0;
}
is
}
/// TODO: Improve the constant time behavior of this.
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
pub(super) fn overflowing_sub_assign(&mut self, rhs: &Self) -> bool {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
// rhs is allowed to be narrower than self
let r_i = if i < rhs.value.len() { rhs.value[i] } else { 0 };
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (self.value[i] as i64) - (r_i as i64) + carry;
if v < 0 {
self.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
self.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
pub(super) fn overflowing_sub_to(&self, rhs: &Self, out: &mut Self) -> bool {
let mut carry = 0;
let n = out.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (a as i64) - (b as i64) + carry;
if v < 0 {
out.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
out.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
/// Performs modular reduction using up to one subtraction of the modulus
/// from the value.
///
/// Will panic if'self' was >= 2*modulus
pub fn reduce_once(&mut self, modulus: &Self) {
let mut reduced = Self::from_usize(0, self.bit_width());
let overflow = self.overflowing_sub_to(modulus, &mut reduced);
reduced.copy_if(!overflow, self);
self.truncate(modulus.bit_width());
}
#[must_use]
pub fn shl(&mut self) -> BaseType {
let mut carry = 0;
for v in self.value.iter_mut() {
let (new_v, _) = v.overflowing_shl(1);
let new_carry = *v >> 31;
*v = new_v | carry;
carry = new_carry;
}
carry
}
pub fn shr(&mut self) {
let mut carry = 0;
for v in self.value.iter_mut().rev() {
let (new_v, _) = v.overflowing_shr(1);
let new_carry = *v & 1;
*v = new_v | (carry << 31);
carry = new_carry;
}
}
/// Computes'self >>= n'
/// NOTE: We assume that 'n' is a publicly known constant.
pub fn shr_n(&mut self, n: usize) {
let byte_shift = n / BASE_BITS;
let carry_size = n % BASE_BITS;
let carry_mask = ((1 as BaseType) << carry_size).wrapping_sub(1);
for i in 0..self.value.len() {
let v = self.value[i];
self.value[i] = 0;
if i < byte_shift {
continue;
}
let j = i - byte_shift;
self.value[j] = v >> carry_size;
if carry_size!= 0 && j > 0 {
let carry = v & carry_mask;
self.value[j - 1] |= carry << (BASE_BITS - carry_size);
}
}
}
/// Computes self >>= BASE_BITS.
pub(super) fn shr_base(&mut self) {
assert_eq!(self.value[0], 0);
for j in 1..self.value.len() {
self.value[j - 1] = self.value[j];
}
let k = self.value.len();
self.value[k - 1] = 0;
}
pub fn and_assign(&mut self, rhs: &Self) {
for i in 0..self.value.len() {
self.value[i] &= rhs.value[i];
}
}
/// Efficienctly (in O(1) time) computes'self % 2^32'
pub fn mod_word(&self) -> u32 {
if self.value.len() == 0 {
0
} else {
self.value[0]
}
}
// TODO: Need a version of this using pmull in aarch64 (vmull_p64)
/// Interprates this integer and 'rhs' as polynomials over GF(2^n) and
/// multiplies them into 'out'.
///
/// Operations in this field:
/// - Addition is XOR
/// - Multiplication is AND
#[cfg(all(target_arch = "x86_64", target_feature = "pclmulqdq"))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
use crate::intrinsics::*;
use core::arch::x86_64::_mm_clmulepi64_si128;
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..self.value.len() {
let a = u64_to_m128i(self.value[i] as u64);
for j in 0..rhs.value.len() {
let b = u64_to_m128i(rhs.value[j] as u64);
let r = u64_from_m128i(unsafe { _mm_clmulepi64_si128(a, b, 0) });
let rl = r as u32;
let rh = (r >> 32) as u32;
// Add to output
out.value[i + j] ^= rl;
out.value[i + j + 1] ^= rh;
}
}
}
// TODO: Finish making this constant time and correct.
#[cfg(not(all(target_arch = "x86_64", target_feature = "pclmulqdq")))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..b.value_bits() {
out.xor_assign_if(b.bit(i) == 1, &a);
a.shl();
}
}
// TODO: Move to a shared utility.
pub fn to_string_radix(&self, radix: u32) -> alloc::string::String {
// TODO: These should be global constants (as well as one)
let zero = Self::from_usize(0, self.bit_width());
let div = Self::from_usize(radix as usize, 32);
let mut s = alloc::string::String::new();
let mut tmp = self.clone();
while tmp > zero {
// TODO: We can divide by a larger power of 10 to make this more efficient.
let (q, r) = tmp.quorem(&div);
tmp = q;
// TODO: Very inefficient
s.insert(
0,
core::char::from_digit(r.value.first().cloned().unwrap_or(0), radix).unwrap(),
);
}
if s.len() == 0 {
s.push('0');
}
s
}
/// Resets the value of the integer to 0.
pub fn assign_zero(&mut self) | {
for v in self.value.iter_mut() {
*v = 0;
}
} | identifier_body |
|
uint.rs | for i in 0..(data.len() / BASE_BYTES) {
out.value[i] = BaseType::from_be_bytes(*array_ref![
data,
data.len() - (BASE_BYTES * (i + 1)),
BASE_BYTES
]);
}
let rem = data.len() % BASE_BYTES;
if rem!= 0 {
let mut rest = [0u8; BASE_BYTES];
rest[(BASE_BYTES - rem)..].copy_from_slice(&data[0..rem]);
out.value[n] = BaseType::from_be_bytes(rest);
}
out
}
fn to_be_bytes(&self) -> Vec<u8> {
let mut data = vec![];
data.reserve_exact(self.value.len() * 4);
for v in self.value.iter().rev() {
data.extend_from_slice(&v.to_be_bytes());
}
data
}
/// Computes and returns'self + rhs'. The output buffer will be 1 bit
/// larger than the inputs to accomadate possible overflow.
fn add(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, core::cmp::max(self.bit_width(), rhs.bit_width()) + 1);
self.add_to(rhs, &mut out);
out
}
/// Computes 'output = self + rhs'. It is the user's responsibility to
/// ensure that the
fn add_to(&self, rhs: &Self, output: &mut Self) {
assert!(output.value.len() >= self.value.len());
assert!(output.value.len() >= rhs.value.len());
let mut carry = 0;
// TODO: Always loop through max(self, rhs, output) length so we know for sure
// that all carries are handled.
let n = output.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
let v = (a as u64) + (b as u64) + carry;
output.value[i] = v as BaseType;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
/// Computes'self += rhs'.
fn add_assign(&mut self, rhs: &Self) {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
let v = (self.value[i] as u64) + (rhs.value[i] as u64) + carry;
self.value[i] = v as u32;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
fn sub(&self, rhs: &Self) -> Self {
let mut out = self.clone();
out.sub_assign(rhs);
out
}
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
fn sub_assign(&mut self, rhs: &Self) {
assert!(!self.overflowing_sub_assign(rhs));
}
fn mul(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, self.bit_width() + rhs.bit_width());
self.mul_to(rhs, &mut out);
out
}
/// O(n^2) multiplication. Assumes that u64*u64 multiplication is always
/// constant time.
///
/// 'out' must be twice the size of
fn mul_to(&self, rhs: &Self, out: &mut Self) {
out.assign_zero();
let mut overflowed = false;
for i in 0..self.value.len() {
let mut carry = 0;
for j in 0..rhs.value.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((self.value[i] as u64) * (rhs.value[j] as u64))
+ (out.value[i + j] as u64)
+ carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as BaseType;
}
// assert!(carry <= u32::max_value() as u64);
if i + rhs.value.len() < out.value.len() {
out.value[i + rhs.value.len()] = carry as BaseType;
} else {
overflowed |= carry!= 0;
}
}
assert!(!overflowed);
}
fn bit(&self, i: usize) -> usize {
((self.value[i / BASE_BITS] >> (i % BASE_BITS)) & 0b01) as usize
}
fn set_bit(&mut self, i: usize, v: usize) {
assert!(v == 0 || v == 1);
let ii = i / BASE_BITS;
let shift = i % BASE_BITS;
let mask =!(1 << shift);
self.value[ii] = (self.value[ii] & mask) | ((v as BaseType) << shift);
}
/// Computes the quotient and remainder of'self / rhs'.
///
/// Any mixture of input bit_widths is supported.
/// Internally this uses binary long division.
///
/// NOTE: This is very slow and should be avoided if possible.
///
/// Returns a tuple of '(self / rhs, self % rhs)' where the quotient is the
/// same width as'self' and the remainder is the same width as 'rhs'.
fn quorem(&self, rhs: &Self) -> (Self, Self) {
let mut q = Self::from_usize(0, self.bit_width()); // Range is [0, Self]
let mut r = Self::from_usize(0, rhs.bit_width()); // Range is [0, rhs).
// TODO: Implement a bit iterator so set_bit requires less work.
for i in (0..self.bit_width()).rev() {
let carry = r.shl();
r.set_bit(0, self.bit(i));
let mut next_r = Self::from_usize(0, rhs.bit_width());
// If there is a carry, then we know that r might be > rhs when the shl also has
// a carry.
let carry2 = r.overflowing_sub_to(rhs, &mut next_r);
let subtract = (carry!= 0) == carry2;
next_r.copy_if(subtract, &mut r);
q.set_bit(i, if subtract { 1 } else { 0 });
}
(q, r)
}
fn value_bits(&self) -> usize {
for i in (0..self.value.len()).rev() {
let zeros = self.value[i].leading_zeros() as usize;
if zeros == BASE_BITS {
continue;
}
return (i * BASE_BITS) + (BASE_BITS - zeros);
}
0
}
fn bit_width(&self) -> usize {
self.value.len() * BASE_BITS
}
}
impl SecureBigUint {
pub fn byte_width(&self) -> usize {
self.value.len() * BASE_BYTES
}
/// Multiplies two numbers and adds their result to the out number.
/// out += self*rhs
pub(super) fn add_mul_to(&self, rhs: &Self, out: &mut Self) {
let a = &self.value[..];
let b = &rhs.value[..];
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((a[i] as u64) * (b[j] as u64)) + (out.value[i + j] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as u32;
}
for k in (i + b.len())..out.value.len() {
let tmp = (out.value[k] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[k] = tmp as u32;
}
}
}
/// Copies'self' to 'out' if should_copy is true. In all cases, this takes
/// a constant amount of time to execute.
///
/// NOTE:'self' and 'out' must have the same bit_width().
#[inline(never)]
pub fn copy_if(&self, should_copy: bool, out: &mut Self) {
assert_eq!(self.value.len(), out.value.len());
// Will be 0b111...111 if should_copy else 0.
let self_mask = (!(should_copy as BaseType)).wrapping_add(1);
let out_mask =!self_mask;
for (self_v, out_v) in self.value.iter().zip(out.value.iter_mut()) {
*out_v = (*self_v & self_mask).wrapping_add(*out_v & out_mask);
}
}
/// Swaps the contents of'self' and 'other' if'should_swap' is true.
///
/// The actual values of both integers are swapped rather than swapping any
/// internal memory pointers so that'should_swap' can not be inferred from
/// the memory locations of the final integers.
///
/// At a given integer bit_width, this should always take the same amount of
/// CPU cycles to execute.
#[inline(never)]
pub fn swap_if(&mut self, other: &mut Self, should_swap: bool) {
assert_eq!(self.value.len(), other.value.len());
// Will be 0b111...111 if should_swap else 0.
let mask = (!(should_swap as BaseType)).wrapping_add(1);
for (self_v, other_v) in self.value.iter_mut().zip(other.value.iter_mut()) {
// Will be 0 if we don't want to swap.
let filter = mask & (*self_v ^ *other_v);
*self_v ^= filter;
*other_v ^= filter;
}
}
/// In-place reverses all the order of all bits in this integer.
pub fn reverse_bits(&mut self) {
let mid = (self.value.len() + 1) / 2;
for i in 0..mid {
let j = self.value.len() - 1 - i;
// Swap if we are not at the middle limb (only relevant if we have an odd number
// of limbs).
if i!= j {
self.value.swap(i, j);
self.value[j] = self.value[j].reverse_bits();
}
self.value[i] = self.value[i].reverse_bits();
}
}
/// Performs'self ^= rhs' only if'should_apply' is true.
pub fn xor_assign_if(&mut self, should_apply: bool, rhs: &Self) {
assert_eq!(self.value.len(), rhs.value.len());
// Will be 0b111...111 if should_apply else 0.
let mask = (!(should_apply as BaseType)).wrapping_add(1);
for i in 0..self.value.len() {
self.value[i] ^= rhs.value[i] & mask;
}
}
pub fn discard(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
self.value.truncate(n);
}
///
pub fn truncate(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
// TODO: Also zero out any high bits
for i in n..self.value.len() {
assert_eq!(self.value[i], 0);
}
self.value.truncate(n);
}
/// Computes 2^n more efficiently than using pow().
/// Only supports exponents smaller than u32.
/// TODO: Just take as input a u32 directly.
pub fn exp2(n: u32, bit_width: usize) -> Self {
let mut out = Self::from_usize(0, bit_width);
out.set_bit(n as usize, 1);
out
}
pub fn is_zero(&self) -> bool {
let mut is = true;
for v in &self.value {
is &= *v == 0;
}
is
}
/// TODO: Improve the constant time behavior of this.
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
pub(super) fn overflowing_sub_assign(&mut self, rhs: &Self) -> bool {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
// rhs is allowed to be narrower than self
let r_i = if i < rhs.value.len() { rhs.value[i] } else { 0 };
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (self.value[i] as i64) - (r_i as i64) + carry;
if v < 0 {
self.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
self.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
pub(super) fn overflowing_sub_to(&self, rhs: &Self, out: &mut Self) -> bool {
let mut carry = 0;
let n = out.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (a as i64) - (b as i64) + carry;
if v < 0 {
out.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
out.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
/// Performs modular reduction using up to one subtraction of the modulus
/// from the value. | reduced.copy_if(!overflow, self);
self.truncate(modulus.bit_width());
}
#[must_use]
pub fn shl(&mut self) -> BaseType {
let mut carry = 0;
for v in self.value.iter_mut() {
let (new_v, _) = v.overflowing_shl(1);
let new_carry = *v >> 31;
*v = new_v | carry;
carry = new_carry;
}
carry
}
pub fn shr(&mut self) {
let mut carry = 0;
for v in self.value.iter_mut().rev() {
let (new_v, _) = v.overflowing_shr(1);
let new_carry = *v & 1;
*v = new_v | (carry << 31);
carry = new_carry;
}
}
/// Computes'self >>= n'
/// NOTE: We assume that 'n' is a publicly known constant.
pub fn shr_n(&mut self, n: usize) {
let byte_shift = n / BASE_BITS;
let carry_size = n % BASE_BITS;
let carry_mask = ((1 as BaseType) << carry_size).wrapping_sub(1);
for i in 0..self.value.len() {
let v = self.value[i];
self.value[i] = 0;
if i < byte_shift {
continue;
}
let j = i - byte_shift;
self.value[j] = v >> carry_size;
if carry_size!= 0 && j > 0 {
let carry = v & carry_mask;
self.value[j - 1] |= carry << (BASE_BITS - carry_size);
}
}
}
/// Computes self >>= BASE_BITS.
pub(super) fn shr_base(&mut self) {
assert_eq!(self.value[0], 0);
for j in 1..self.value.len() {
self.value[j - 1] = self.value[j];
}
let k = self.value.len();
self.value[k - 1] = 0;
}
pub fn and_assign(&mut self, rhs: &Self) {
for i in 0..self.value.len() {
self.value[i] &= rhs.value[i];
}
}
/// Efficienctly (in O(1) time) computes'self % 2^32'
pub fn mod_word(&self) -> u32 {
if self.value.len() == 0 {
0
} else {
self.value[0]
}
}
// TODO: Need a version of this using pmull in aarch64 (vmull_p64)
/// Interprates this integer and 'rhs' as polynomials over GF(2^n) and
/// multiplies them into 'out'.
///
/// Operations in this field:
/// - Addition is XOR
/// - Multiplication is AND
#[cfg(all(target_arch = "x86_64", target_feature = "pclmulqdq"))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
use crate::intrinsics::*;
use core::arch::x86_64::_mm_clmulepi64_si128;
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..self.value.len() {
let a = u64_to_m128i(self.value[i] as u64);
for j in 0..rhs.value.len() {
let b = u64_to_m128i(rhs.value[j] as u64);
let r = u64_from_m128i(unsafe { _mm_clmulepi64_si128(a, b, 0) });
let rl = r as u32;
let rh = (r >> 32) as u32;
// Add to output
out.value[i + j] ^= rl;
out.value[i + j + 1] ^= rh;
}
}
}
// TODO: Finish making this constant time and correct.
#[cfg(not(all(target_arch = "x86_64", target_feature = "pclmulqdq")))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..b.value_bits() {
out.xor_assign_if(b.bit(i) == 1, &a);
a.shl();
}
}
// TODO: Move to a shared utility.
pub fn to_string_radix(&self, radix: u32) -> alloc::string::String {
// TODO: These should be global constants (as well as one)
let zero = Self::from_usize(0, self.bit_width());
let div = Self::from_usize(radix as usize, 32);
let mut s = alloc::string::String::new();
let mut tmp = self.clone();
while tmp > zero {
// TODO: We can divide by a larger power of 10 to make this more efficient.
let (q, r) = tmp.quorem(&div);
tmp = q;
// TODO: Very inefficient
s.insert(
0,
core::char::from_digit(r.value.first().cloned().unwrap_or(0), radix).unwrap(),
);
}
if s.len() == 0 {
s.push('0');
}
s
}
/// Resets the value of the integer to 0.
pub fn assign_zero(&mut self) {
for | ///
/// Will panic if 'self' was >= 2*modulus
pub fn reduce_once(&mut self, modulus: &Self) {
let mut reduced = Self::from_usize(0, self.bit_width());
let overflow = self.overflowing_sub_to(modulus, &mut reduced); | random_line_split |
uint.rs | for i in 0..(data.len() / BASE_BYTES) {
out.value[i] = BaseType::from_be_bytes(*array_ref![
data,
data.len() - (BASE_BYTES * (i + 1)),
BASE_BYTES
]);
}
let rem = data.len() % BASE_BYTES;
if rem!= 0 {
let mut rest = [0u8; BASE_BYTES];
rest[(BASE_BYTES - rem)..].copy_from_slice(&data[0..rem]);
out.value[n] = BaseType::from_be_bytes(rest);
}
out
}
fn to_be_bytes(&self) -> Vec<u8> {
let mut data = vec![];
data.reserve_exact(self.value.len() * 4);
for v in self.value.iter().rev() {
data.extend_from_slice(&v.to_be_bytes());
}
data
}
/// Computes and returns'self + rhs'. The output buffer will be 1 bit
/// larger than the inputs to accomadate possible overflow.
fn add(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, core::cmp::max(self.bit_width(), rhs.bit_width()) + 1);
self.add_to(rhs, &mut out);
out
}
/// Computes 'output = self + rhs'. It is the user's responsibility to
/// ensure that the
fn add_to(&self, rhs: &Self, output: &mut Self) {
assert!(output.value.len() >= self.value.len());
assert!(output.value.len() >= rhs.value.len());
let mut carry = 0;
// TODO: Always loop through max(self, rhs, output) length so we know for sure
// that all carries are handled.
let n = output.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
let v = (a as u64) + (b as u64) + carry;
output.value[i] = v as BaseType;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
/// Computes'self += rhs'.
fn add_assign(&mut self, rhs: &Self) {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
let v = (self.value[i] as u64) + (rhs.value[i] as u64) + carry;
self.value[i] = v as u32;
carry = v >> 32;
}
assert_eq!(carry, 0);
}
fn sub(&self, rhs: &Self) -> Self {
let mut out = self.clone();
out.sub_assign(rhs);
out
}
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
fn sub_assign(&mut self, rhs: &Self) {
assert!(!self.overflowing_sub_assign(rhs));
}
fn mul(&self, rhs: &Self) -> Self {
let mut out = Self::from_usize(0, self.bit_width() + rhs.bit_width());
self.mul_to(rhs, &mut out);
out
}
/// O(n^2) multiplication. Assumes that u64*u64 multiplication is always
/// constant time.
///
/// 'out' must be twice the size of
fn mul_to(&self, rhs: &Self, out: &mut Self) {
out.assign_zero();
let mut overflowed = false;
for i in 0..self.value.len() {
let mut carry = 0;
for j in 0..rhs.value.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((self.value[i] as u64) * (rhs.value[j] as u64))
+ (out.value[i + j] as u64)
+ carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as BaseType;
}
// assert!(carry <= u32::max_value() as u64);
if i + rhs.value.len() < out.value.len() {
out.value[i + rhs.value.len()] = carry as BaseType;
} else {
overflowed |= carry!= 0;
}
}
assert!(!overflowed);
}
fn bit(&self, i: usize) -> usize {
((self.value[i / BASE_BITS] >> (i % BASE_BITS)) & 0b01) as usize
}
fn set_bit(&mut self, i: usize, v: usize) {
assert!(v == 0 || v == 1);
let ii = i / BASE_BITS;
let shift = i % BASE_BITS;
let mask =!(1 << shift);
self.value[ii] = (self.value[ii] & mask) | ((v as BaseType) << shift);
}
/// Computes the quotient and remainder of'self / rhs'.
///
/// Any mixture of input bit_widths is supported.
/// Internally this uses binary long division.
///
/// NOTE: This is very slow and should be avoided if possible.
///
/// Returns a tuple of '(self / rhs, self % rhs)' where the quotient is the
/// same width as'self' and the remainder is the same width as 'rhs'.
fn quorem(&self, rhs: &Self) -> (Self, Self) {
let mut q = Self::from_usize(0, self.bit_width()); // Range is [0, Self]
let mut r = Self::from_usize(0, rhs.bit_width()); // Range is [0, rhs).
// TODO: Implement a bit iterator so set_bit requires less work.
for i in (0..self.bit_width()).rev() {
let carry = r.shl();
r.set_bit(0, self.bit(i));
let mut next_r = Self::from_usize(0, rhs.bit_width());
// If there is a carry, then we know that r might be > rhs when the shl also has
// a carry.
let carry2 = r.overflowing_sub_to(rhs, &mut next_r);
let subtract = (carry!= 0) == carry2;
next_r.copy_if(subtract, &mut r);
q.set_bit(i, if subtract { 1 } else { 0 });
}
(q, r)
}
fn value_bits(&self) -> usize {
for i in (0..self.value.len()).rev() {
let zeros = self.value[i].leading_zeros() as usize;
if zeros == BASE_BITS {
continue;
}
return (i * BASE_BITS) + (BASE_BITS - zeros);
}
0
}
fn bit_width(&self) -> usize {
self.value.len() * BASE_BITS
}
}
impl SecureBigUint {
pub fn byte_width(&self) -> usize {
self.value.len() * BASE_BYTES
}
/// Multiplies two numbers and adds their result to the out number.
/// out += self*rhs
pub(super) fn add_mul_to(&self, rhs: &Self, out: &mut Self) {
let a = &self.value[..];
let b = &rhs.value[..];
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
// TODO: Ensure this uses the UMAAL instruction on ARM
let tmp = ((a[i] as u64) * (b[j] as u64)) + (out.value[i + j] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[i + j] = tmp as u32;
}
for k in (i + b.len())..out.value.len() {
let tmp = (out.value[k] as u64) + carry;
carry = tmp >> BASE_BITS;
out.value[k] = tmp as u32;
}
}
}
/// Copies'self' to 'out' if should_copy is true. In all cases, this takes
/// a constant amount of time to execute.
///
/// NOTE:'self' and 'out' must have the same bit_width().
#[inline(never)]
pub fn copy_if(&self, should_copy: bool, out: &mut Self) {
assert_eq!(self.value.len(), out.value.len());
// Will be 0b111...111 if should_copy else 0.
let self_mask = (!(should_copy as BaseType)).wrapping_add(1);
let out_mask =!self_mask;
for (self_v, out_v) in self.value.iter().zip(out.value.iter_mut()) {
*out_v = (*self_v & self_mask).wrapping_add(*out_v & out_mask);
}
}
/// Swaps the contents of'self' and 'other' if'should_swap' is true.
///
/// The actual values of both integers are swapped rather than swapping any
/// internal memory pointers so that'should_swap' can not be inferred from
/// the memory locations of the final integers.
///
/// At a given integer bit_width, this should always take the same amount of
/// CPU cycles to execute.
#[inline(never)]
pub fn swap_if(&mut self, other: &mut Self, should_swap: bool) {
assert_eq!(self.value.len(), other.value.len());
// Will be 0b111...111 if should_swap else 0.
let mask = (!(should_swap as BaseType)).wrapping_add(1);
for (self_v, other_v) in self.value.iter_mut().zip(other.value.iter_mut()) {
// Will be 0 if we don't want to swap.
let filter = mask & (*self_v ^ *other_v);
*self_v ^= filter;
*other_v ^= filter;
}
}
/// In-place reverses all the order of all bits in this integer.
pub fn reverse_bits(&mut self) {
let mid = (self.value.len() + 1) / 2;
for i in 0..mid {
let j = self.value.len() - 1 - i;
// Swap if we are not at the middle limb (only relevant if we have an odd number
// of limbs).
if i!= j {
self.value.swap(i, j);
self.value[j] = self.value[j].reverse_bits();
}
self.value[i] = self.value[i].reverse_bits();
}
}
/// Performs'self ^= rhs' only if'should_apply' is true.
pub fn xor_assign_if(&mut self, should_apply: bool, rhs: &Self) {
assert_eq!(self.value.len(), rhs.value.len());
// Will be 0b111...111 if should_apply else 0.
let mask = (!(should_apply as BaseType)).wrapping_add(1);
for i in 0..self.value.len() {
self.value[i] ^= rhs.value[i] & mask;
}
}
pub fn discard(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
self.value.truncate(n);
}
///
pub fn truncate(&mut self, bit_width: usize) {
let n = ceil_div(bit_width, BASE_BITS);
// TODO: Also zero out any high bits
for i in n..self.value.len() {
assert_eq!(self.value[i], 0);
}
self.value.truncate(n);
}
/// Computes 2^n more efficiently than using pow().
/// Only supports exponents smaller than u32.
/// TODO: Just take as input a u32 directly.
pub fn exp2(n: u32, bit_width: usize) -> Self {
let mut out = Self::from_usize(0, bit_width);
out.set_bit(n as usize, 1);
out
}
pub fn is_zero(&self) -> bool {
let mut is = true;
for v in &self.value {
is &= *v == 0;
}
is
}
/// TODO: Improve the constant time behavior of this.
/// It would be useful to have a conditional form of this that adds like
/// subtraction by zero.
pub(super) fn overflowing_sub_assign(&mut self, rhs: &Self) -> bool {
let mut carry = 0;
let n = self.value.len();
for i in 0..n {
// rhs is allowed to be narrower than self
let r_i = if i < rhs.value.len() { rhs.value[i] } else { 0 };
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (self.value[i] as i64) - (r_i as i64) + carry;
if v < 0 {
self.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
self.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
pub(super) fn overflowing_sub_to(&self, rhs: &Self, out: &mut Self) -> bool {
let mut carry = 0;
let n = out.value.len();
for i in 0..n {
let a = self.value.get(i).cloned().unwrap_or(0);
let b = rhs.value.get(i).cloned().unwrap_or(0);
// TODO: Try to use overflowing_sub instead (that way we don't need to go to
// 64bits)
let v = (a as i64) - (b as i64) + carry;
if v < 0 {
out.value[i] = (v + (u32::max_value() as i64) + 1) as u32;
carry = -1;
} else {
out.value[i] = v as u32;
carry = 0;
}
}
carry!= 0
}
/// Performs modular reduction using up to one subtraction of the modulus
/// from the value.
///
/// Will panic if'self' was >= 2*modulus
pub fn reduce_once(&mut self, modulus: &Self) {
let mut reduced = Self::from_usize(0, self.bit_width());
let overflow = self.overflowing_sub_to(modulus, &mut reduced);
reduced.copy_if(!overflow, self);
self.truncate(modulus.bit_width());
}
#[must_use]
pub fn shl(&mut self) -> BaseType {
let mut carry = 0;
for v in self.value.iter_mut() {
let (new_v, _) = v.overflowing_shl(1);
let new_carry = *v >> 31;
*v = new_v | carry;
carry = new_carry;
}
carry
}
pub fn shr(&mut self) {
let mut carry = 0;
for v in self.value.iter_mut().rev() {
let (new_v, _) = v.overflowing_shr(1);
let new_carry = *v & 1;
*v = new_v | (carry << 31);
carry = new_carry;
}
}
/// Computes'self >>= n'
/// NOTE: We assume that 'n' is a publicly known constant.
pub fn | (&mut self, n: usize) {
let byte_shift = n / BASE_BITS;
let carry_size = n % BASE_BITS;
let carry_mask = ((1 as BaseType) << carry_size).wrapping_sub(1);
for i in 0..self.value.len() {
let v = self.value[i];
self.value[i] = 0;
if i < byte_shift {
continue;
}
let j = i - byte_shift;
self.value[j] = v >> carry_size;
if carry_size!= 0 && j > 0 {
let carry = v & carry_mask;
self.value[j - 1] |= carry << (BASE_BITS - carry_size);
}
}
}
/// Computes self >>= BASE_BITS.
pub(super) fn shr_base(&mut self) {
assert_eq!(self.value[0], 0);
for j in 1..self.value.len() {
self.value[j - 1] = self.value[j];
}
let k = self.value.len();
self.value[k - 1] = 0;
}
pub fn and_assign(&mut self, rhs: &Self) {
for i in 0..self.value.len() {
self.value[i] &= rhs.value[i];
}
}
/// Efficienctly (in O(1) time) computes'self % 2^32'
pub fn mod_word(&self) -> u32 {
if self.value.len() == 0 {
0
} else {
self.value[0]
}
}
// TODO: Need a version of this using pmull in aarch64 (vmull_p64)
/// Interprates this integer and 'rhs' as polynomials over GF(2^n) and
/// multiplies them into 'out'.
///
/// Operations in this field:
/// - Addition is XOR
/// - Multiplication is AND
#[cfg(all(target_arch = "x86_64", target_feature = "pclmulqdq"))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
use crate::intrinsics::*;
use core::arch::x86_64::_mm_clmulepi64_si128;
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..self.value.len() {
let a = u64_to_m128i(self.value[i] as u64);
for j in 0..rhs.value.len() {
let b = u64_to_m128i(rhs.value[j] as u64);
let r = u64_from_m128i(unsafe { _mm_clmulepi64_si128(a, b, 0) });
let rl = r as u32;
let rh = (r >> 32) as u32;
// Add to output
out.value[i + j] ^= rl;
out.value[i + j + 1] ^= rh;
}
}
}
// TODO: Finish making this constant time and correct.
#[cfg(not(all(target_arch = "x86_64", target_feature = "pclmulqdq")))]
pub fn carryless_mul_to(&self, rhs: &Self, out: &mut Self) {
assert!(out.bit_width() >= self.bit_width() + rhs.bit_width() - 1);
out.assign_zero();
for i in 0..b.value_bits() {
out.xor_assign_if(b.bit(i) == 1, &a);
a.shl();
}
}
// TODO: Move to a shared utility.
pub fn to_string_radix(&self, radix: u32) -> alloc::string::String {
// TODO: These should be global constants (as well as one)
let zero = Self::from_usize(0, self.bit_width());
let div = Self::from_usize(radix as usize, 32);
let mut s = alloc::string::String::new();
let mut tmp = self.clone();
while tmp > zero {
// TODO: We can divide by a larger power of 10 to make this more efficient.
let (q, r) = tmp.quorem(&div);
tmp = q;
// TODO: Very inefficient
s.insert(
0,
core::char::from_digit(r.value.first().cloned().unwrap_or(0), radix).unwrap(),
);
}
if s.len() == 0 {
s.push('0');
}
s
}
/// Resets the value of the integer to 0.
pub fn assign_zero(&mut self) {
| shr_n | identifier_name |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status!= zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len()!= 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn | (&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| name | identifier_name |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
} | /// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status!= zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len()!= 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
} | },
};
}
| random_line_split |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
}
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status!= zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len()!= 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() | event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
| identifier_body |
lid_shutdown.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::error::PowerManagerError;
use crate::message::{Message, MessageReturn};
use crate::node::Node;
use crate::shutdown_request::ShutdownRequest;
use crate::utils::connect_to_driver;
use anyhow::{format_err, Error};
use async_trait::async_trait;
use fidl_fuchsia_hardware_input::{DeviceMarker as LidMarker, DeviceProxy as LidProxy};
use fuchsia_inspect::{self as inspect, NumericProperty, Property};
use fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode};
use fuchsia_vfs_watcher as vfs;
use fuchsia_zircon::{self as zx, AsHandleRef};
use futures::{
future::{FutureExt, LocalBoxFuture},
stream::FuturesUnordered,
TryStreamExt,
};
use io_util::{open_directory_in_namespace, OPEN_RIGHT_READABLE};
use log::*;
use serde_derive::Deserialize;
use serde_json as json;
use std::{
cell::RefCell,
collections::HashMap,
path::{Path, PathBuf},
rc::Rc,
};
/// Node: LidShutdown
///
/// Summary: Responds to lid closed events from devices with a lid sensor by waiting for a report
/// using the input FIDL protocol.
///
/// Handles Messages: N/A
///
/// Sends Messages:
/// - SystemShutdown
///
/// FIDL dependencies:
/// - fuchsia.hardware.input: the node uses this protocol to wait on reports from the
/// lid device
/// The lid sensor is not a real HID device however this descriptor is defined in the lid driver
/// and will be used to send lid HID reports for any ACPI lid sensor.
const HID_LID_DESCRIPTOR: [u8; 9] = [
0x05, 0x01, // Usage Page (Generic Desktop)
0x09, 0x80, // Usage (System Control)
0xA1, 0x01, // Collection (Application)
0x0A, 0xFF, 0x01, // Usage (0x01FF, unique to lid reports)
];
// Lid closed HID report
const LID_CLOSED: u8 = 0x0;
static INPUT_DEVICES_DIRECTORY: &str = "/dev/class/input";
pub struct LidShutdownBuilder<'a> {
proxy: Option<LidProxy>,
lid_report_event: Option<zx::Event>,
system_shutdown_node: Rc<dyn Node>,
inspect_root: Option<&'a inspect::Node>,
}
impl<'a> LidShutdownBuilder<'a> {
pub fn new(system_shutdown_node: Rc<dyn Node>) -> Self {
LidShutdownBuilder {
proxy: None,
lid_report_event: None,
system_shutdown_node,
inspect_root: None,
}
}
#[cfg(test)]
pub fn new_with_event_and_proxy(
proxy: LidProxy,
lid_report_event: zx::Event,
system_shutdown_node: Rc<dyn Node>,
) -> Self {
Self {
proxy: Some(proxy),
lid_report_event: Some(lid_report_event),
system_shutdown_node,
inspect_root: None,
}
}
pub fn new_from_json(json_data: json::Value, nodes: &HashMap<String, Rc<dyn Node>>) -> Self {
#[derive(Deserialize)]
struct Dependencies {
system_shutdown_node: String,
}
#[derive(Deserialize)]
struct JsonData {
dependencies: Dependencies,
}
let data: JsonData = json::from_value(json_data).unwrap();
Self::new(nodes[&data.dependencies.system_shutdown_node].clone())
}
#[cfg(test)]
pub fn with_inspect_root(mut self, root: &'a inspect::Node) -> Self {
self.inspect_root = Some(root);
self
}
pub async fn build<'b>(
self,
futures_out: &FuturesUnordered<LocalBoxFuture<'b, ()>>,
) -> Result<Rc<LidShutdown>, Error> {
// In tests use the default proxy.
let proxy = match self.proxy {
Some(proxy) => proxy,
None => Self::find_lid_sensor().await?,
};
// In tests use the default event.
let report_event = match self.lid_report_event {
Some(report_event) => report_event,
None => match proxy.get_reports_event().await {
Ok((_, report_event)) => report_event,
Err(_e) => return Err(format_err!("Could not get report event.")),
},
};
// In tests use the default inspect root node
let inspect_root = self.inspect_root.unwrap_or(inspect::component::inspector().root());
let node = Rc::new(LidShutdown {
proxy,
report_event,
system_shutdown_node: self.system_shutdown_node,
inspect: InspectData::new(inspect_root, "LidShutdown".to_string()),
});
futures_out.push(node.clone().watch_lid());
Ok(node)
}
/// Checks all the input devices until the lid sensor is found.
async fn find_lid_sensor() -> Result<LidProxy, Error> {
let dir_proxy = open_directory_in_namespace(INPUT_DEVICES_DIRECTORY, OPEN_RIGHT_READABLE)?;
let mut watcher = vfs::Watcher::new(dir_proxy).await?;
while let Some(msg) = watcher.try_next().await? {
match msg.event {
vfs::WatchEvent::EXISTING | vfs::WatchEvent::ADD_FILE => |
_ => (),
}
}
Err(format_err!("No lid device found"))
}
/// Opens the sensor's device file. Returns the device if the correct HID
/// report descriptor is found.
async fn open_sensor(filename: &PathBuf) -> Result<LidProxy, Error> {
let path = Path::new(INPUT_DEVICES_DIRECTORY).join(filename);
let device = connect_to_driver::<LidMarker>(&String::from(
path.to_str().ok_or(format_err!("Could not read path {:?}", path))?,
))
.await?;
if let Ok(device_descriptor) = device.get_report_desc().await {
if device_descriptor.len() < HID_LID_DESCRIPTOR.len() {
return Err(format_err!("Short HID header"));
}
let device_header = &device_descriptor[0..HID_LID_DESCRIPTOR.len()];
if device_header == HID_LID_DESCRIPTOR {
return Ok(device);
} else {
return Err(format_err!("Device is not lid sensor"));
}
}
Err(format_err!("Could not get device HID report descriptor"))
}
}
pub struct LidShutdown {
proxy: LidProxy,
/// Event that will signal |USER_0| when a report is in the lid device's report FIFO.
report_event: zx::Event,
/// Node to provide the system shutdown functionality via the SystemShutdown message.
system_shutdown_node: Rc<dyn Node>,
/// A struct for managing Component Inspection data
inspect: InspectData,
}
impl LidShutdown {
fn watch_lid<'a>(self: Rc<Self>) -> LocalBoxFuture<'a, ()> {
async move {
loop {
self.watch_lid_inner().await;
}
}
.boxed_local()
}
/// Watches the lid device for reports.
async fn watch_lid_inner(&self) {
match self.report_event.wait_handle(zx::Signals::USER_0, zx::Time::INFINITE) {
Err(e) => error!("Could not wait for lid event: {:?}", e),
_ => match self.check_report().await {
Ok(()) => (),
Err(e) => {
self.inspect.read_errors.add(1);
self.inspect.last_read_error.set(format!("{}", e).as_str());
error!("Failed to read lid report: {}", e)
}
},
};
}
/// Reads the report from the lid sensor and sends shutdown signal if lid is closed.
async fn check_report(&self) -> Result<(), Error> {
let (status, report, _time) = self.proxy.read_report().await?;
let status = zx::Status::from_raw(status);
if status!= zx::Status::OK {
return Err(format_err!("Error reading report {}", status));
}
if report.len()!= 1 {
return Err(format_err!("Expected single byte report, found {:?}", report));
}
self.inspect.log_lid_report(format!("{:?}", report));
let report = report[0];
if report == LID_CLOSED {
info!("Lid closed. Shutting down...");
self.send_message(
&self.system_shutdown_node,
&Message::SystemShutdown(ShutdownRequest::PowerOff),
)
.await
.map_err(|e| format_err!("Failed to shut down the system: {:?}", e))?;
}
Ok(())
}
}
#[async_trait(?Send)]
impl Node for LidShutdown {
fn name(&self) -> String {
"LidShutdown".to_string()
}
async fn handle_message(&self, _msg: &Message) -> Result<MessageReturn, PowerManagerError> {
Err(PowerManagerError::Unsupported)
}
}
struct InspectData {
lid_reports: RefCell<BoundedListNode>,
read_errors: inspect::UintProperty,
last_read_error: inspect::StringProperty,
}
impl InspectData {
/// Number of inspect samples to store in the `lid_reports` BoundedListNode.
// Store the last 60 lid reports
const NUM_INSPECT_LID_REPORTS: usize = 60;
fn new(parent: &inspect::Node, name: String) -> Self {
// Create a local root node and properties
let root = parent.create_child(name);
let lid_reports = RefCell::new(BoundedListNode::new(
root.create_child("lid_reports"),
Self::NUM_INSPECT_LID_REPORTS,
));
let read_errors = root.create_uint("read_lid_report_error_count", 0);
let last_read_error = root.create_string("last_read_error", "");
// Pass ownership of the new node to the parent node, otherwise it'll be dropped
parent.record(root);
InspectData { lid_reports, read_errors, last_read_error }
}
fn log_lid_report(&self, lid_report: String) {
inspect_log!(self.lid_reports.borrow_mut(), lid_report: lid_report);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::mock_node::{create_dummy_node, MessageMatcher, MockNodeMaker};
use crate::{msg_eq, msg_ok_return};
use fidl_fuchsia_hardware_input as finput;
use fuchsia_async as fasync;
use fuchsia_inspect::testing::TreeAssertion;
use fuchsia_zircon::HandleBased;
use inspect::assert_data_tree;
const LID_OPEN: u8 = 0x1;
/// Spawns a new task that acts as a fake device driver for testing purposes. The driver only
/// handles requests for ReadReport - trying to send any other requests to it is a bug.
/// Each ReadReport responds with the |lid_report| specified.
fn setup_fake_driver(lid_report: u8) -> LidProxy {
let (proxy, mut stream) = fidl::endpoints::create_proxy_and_stream::<LidMarker>().unwrap();
fasync::Task::local(async move {
while let Ok(req) = stream.try_next().await {
match req {
Some(finput::DeviceRequest::ReadReport { responder }) => {
let _ = responder.send(zx::Status::OK.into_raw(), &[lid_report], 0 as i64);
}
_ => assert!(false),
}
}
})
.detach();
proxy
}
/// Tests that well-formed configuration JSON does not panic the `new_from_json` function.
#[fasync::run_singlethreaded(test)]
async fn test_new_from_json() {
let json_data = json::json!({
"type": "LidShutdown",
"name": "lid_shutdown",
"dependencies": {
"system_shutdown_node": "shutdown",
},
});
let mut nodes: HashMap<String, Rc<dyn Node>> = HashMap::new();
nodes.insert("shutdown".to_string(), create_dummy_node());
let _ = LidShutdownBuilder::new_from_json(json_data, &nodes);
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid closed report, it triggers a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_triggered_shutdown() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![(
msg_eq!(SystemShutdown(ShutdownRequest::PowerOff)),
msg_ok_return!(SystemShutdown),
)],
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker goes out of scope, it verifies the ShutdownNode received the shutdown
// request.
}
/// Tests that when the node receives a signal on its |report_event|, it checks for a lid
/// report and, on reception of a lid open report, it does NOT trigger a system shutdown.
#[fasync::run_singlethreaded(test)]
async fn test_event_handling() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make(
"Shutdown",
vec![], // the shutdown node is not expected to receive any messages
);
let event = zx::Event::create().unwrap();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_OPEN),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.build(&FuturesUnordered::new())
.await
.unwrap();
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
// When mock_maker will verify that the ShutdownNode receives no messages until it goes
// out of scope.
}
/// Tests that an unsupported message is handled gracefully and an error is returned.
#[fasync::run_singlethreaded(test)]
async fn test_unsupported_msg() {
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(LID_CLOSED),
zx::Event::create().unwrap(),
shutdown_node,
)
.build(&node_futures)
.await
.unwrap();
match node.handle_message(&Message::GetCpuLoads).await {
Err(PowerManagerError::Unsupported) => {}
e => panic!("Unexpected return value: {:?}", e),
}
}
/// Tests for the presence and correctness of dynamically-added inspect data
#[fasync::run_singlethreaded(test)]
async fn test_inspect_data() {
let lid_state = LID_OPEN;
let inspector = inspect::Inspector::new();
let mut mock_maker = MockNodeMaker::new();
let shutdown_node = mock_maker.make("Shutdown", vec![]);
let event = zx::Event::create().unwrap();
let node_futures = FuturesUnordered::new();
let node = LidShutdownBuilder::new_with_event_and_proxy(
setup_fake_driver(lid_state),
event.duplicate_handle(zx::Rights::BASIC).unwrap(),
shutdown_node,
)
.with_inspect_root(inspector.root())
.build(&node_futures)
.await
.unwrap();
// The node will read the current temperature and log the sample into Inspect. Read enough
// samples to test that the correct number of samples are logged and older ones are dropped.
for _ in 0..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
event
.signal_handle(zx::Signals::NONE, zx::Signals::USER_0)
.expect("Failed to signal event");
node.watch_lid_inner().await;
}
let mut root = TreeAssertion::new("LidShutdown", false);
let mut lid_reports = TreeAssertion::new("lid_reports", true);
// Since we read 10 more samples than our limit allows, the first 10 should be dropped. So
// test that the sample numbering starts at 10 and continues for the expected number of
// samples.
for i in 10..InspectData::NUM_INSPECT_LID_REPORTS + 10 {
let mut sample_child = TreeAssertion::new(&i.to_string(), true);
sample_child
.add_property_assertion("lid_report", Box::new(format!("{:?}", [lid_state])));
sample_child.add_property_assertion("@time", Box::new(inspect::testing::AnyProperty));
lid_reports.add_child_assertion(sample_child);
}
root.add_child_assertion(lid_reports);
assert_data_tree!(inspector, root: { root, });
}
}
| {
match Self::open_sensor(&msg.filename).await {
Ok(device) => return Ok(device),
_ => (),
}
} | conditional_block |
lib.rs | #![deny(
// missing_copy_implementations,
// missing_debug_implementations,
// missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
// #![warn(rust_2018_idioms)]
#![doc(test(attr(deny(
missing_copy_implementations,
missing_debug_implementations,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences,
))))]
#![doc(test(attr(warn(rust_2018_idioms))))]
// Not needed for 2018 edition and conflicts with `rust_2018_idioms`
#![doc(test(no_crate_inject))]
#![doc(html_root_url = "https://docs.rs/serde_with/1.5.0-alpha.1")]
//! [](https://docs.rs/serde_with/)
//! [](https://crates.io/crates/serde_with/)
//! [](https://github.com/jonasbb/serde_with)
//! [](https://codecov.io/gh/jonasbb/serde_with)
//!
//! ---
//!
//! This crate provides custom de/serialization helpers to use in combination with [serde's with-annotation][with-annotation] and with the improved [`serde_as`][]-annotation.
//! Some common use cases are:
//!
//! * De/Serializing a type using the `Display` and `FromStr` traits, e.g., for `u8`, `url::Url`, or `mime::Mime`.
//! Check [`DisplayFromStr`][] or [`serde_with::rust::display_fromstr`][display_fromstr] for details.
//! * Skip serializing all empty `Option` types with [`#[skip_serializing_none]`][skip_serializing_none].
//! * Apply a prefix to each fieldname of a struct, without changing the de/serialize implementations of the struct using [`with_prefix!`][].
//! * Deserialize a comma separated list like `#hash,#tags,#are,#great` into a `Vec<String>`.
//! Check the documentation for [`serde_with::rust::StringWithSeparator::<CommaSeparator>`][StringWithSeparator].
//!
//! Check out the [**user guide**][user guide] to find out more tips and tricks about this crate.
//!
//! # Use `serde_with` in your Project
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies.serde_with]
//! version = "1.5.0-alpha.1"
//! features = [ "..." ]
//! ```
//!
//! The crate contains different features for integration with other common crates.
//! Check the [feature flags][] section for information about all available features.
//!
//! # Examples
//!
//! Annotate your struct or enum to enable the custom de/serializer.
//!
//! ## `DisplayFromStr`
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{serde_as, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize with Display, deserialize with FromStr
//! #[serde_as(as = "DisplayFromStr")]
//! bar: u8,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {bar: 12}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"bar": "12"}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## `skip_serializing_none`
//!
//! This situation often occurs with JSON, but other formats also support optional fields.
//! If many fields are optional, putting the annotations on the structs can become tedious.
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{skip_serializing_none, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[skip_serializing_none]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! a: Option<usize>,
//! b: Option<usize>,
//! c: Option<usize>,
//! d: Option<usize>,
//! e: Option<usize>,
//! f: Option<usize>,
//! g: Option<usize>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {a: None, b: None, c: None, d: Some(4), e: None, f: None, g: Some(7)}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"d": 4, "g": 7}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## Advanced `serde_as` usage
//!
//! This example is mainly supposed to highlight the flexibility of the `serde_as`-annotation compared to [serde's with-annotation][with-annotation].
//! More details about `serde_as` can be found in the [user guide][].
//!
//!
//! ```rust
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! # use {
//! # serde_derive::{Deserialize, Serialize},
//! # serde_with::{serde_as, DisplayFromStr, DurationSeconds, hex::Hex},
//! # std::time::Duration,
//! # std::collections::BTreeMap,
//! # };
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize them into a list of number as seconds
//! #[serde_as(as = "Vec<DurationSeconds>")]
//! durations: Vec<Duration>,
//! // We can treat a Vec like a map with duplicates.
//! // JSON only allows string keys, so convert i32 to strings
//! // The bytes will be hex encoded
//! #[serde_as(as = "BTreeMap<DisplayFromStr, Hex>")]
//! bytes: Vec<(i32, Vec<u8>)>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json", feature = "hex"))] {
//! // This will serialize
//! # let foo =
//! Foo {
//! durations: vec![Duration::new(5, 0), Duration::new(3600, 0), Duration::new(0, 0)],
//! bytes: vec![
//! (1, vec![0, 1, 2]),
//! (-100, vec![100, 200, 255]),
//! (1, vec![0, 111, 222]),
//! ],
//! }
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {
//! "durations": [5, 3600, 0],
//! "bytes": {
//! "1": "000102",
//! "-100": "64c8ff",
//! "1": "006fde"
//! }
//! }
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! [`DisplayFromStr`]: https://docs.rs/serde_with/*/serde_with/struct.DisplayFromStr.html
//! [`serde_as`]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [`with_prefix!`]: https://docs.rs/serde_with/*/serde_with/macro.with_prefix.html
//! [display_fromstr]: https://docs.rs/serde_with/*/serde_with/rust/display_fromstr/index.html
//! [feature flags]: https://docs.rs/serde_with/*/serde_with/guide/feature_flags/index.html
//! [skip_serializing_none]: https://docs.rs/serde_with/*/serde_with/attr.skip_serializing_none.html
//! [StringWithSeparator]: https://docs.rs/serde_with/*/serde_with/rust/struct.StringWithSeparator.html
//! [user guide]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [with-annotation]: https://serde.rs/field-attrs.html#with
#[doc(hidden)]
pub extern crate serde;
#[cfg(feature = "chrono")]
pub mod chrono;
pub mod de;
mod duplicate_key_impls;
mod flatten_maybe;
pub mod formats;
#[cfg(feature = "hex")]
pub mod hex;
#[cfg(feature = "json")]
pub mod json;
pub mod rust;
pub mod ser;
mod utils;
#[doc(hidden)]
pub mod with_prefix;
// Taken from shepmaster/snafu
// Originally licensed as MIT+Apache 2
// https://github.com/shepmaster/snafu/blob/fd37d79d4531ed1d3eebffad0d658928eb860cfe/src/lib.rs#L121-L165
#[cfg(feature = "guide")]
macro_rules! generate_guide {
(pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { } $($rest)*);
};
(pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { $($children)* } $($rest)*);
};
(@gen $prefix:expr, ) => {};
(@gen $prefix:expr, pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen $prefix, pub mod $name { } $($rest)*);
};
(@gen $prefix:expr, @code pub mod $name:ident; $($rest:tt)*) => {
pub mod $name;
generate_guide!(@gen $prefix, $($rest)*);
};
(@gen $prefix:expr, pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
doc_comment::doc_comment! {
include_str!(concat!($prefix, "/", stringify!($name), ".md")),
pub mod $name {
generate_guide!(@gen concat!($prefix, "/", stringify!($name)), $($children)*);
}
}
generate_guide!(@gen $prefix, $($rest)*);
};
}
#[cfg(feature = "guide")]
generate_guide! {
pub mod guide {
pub mod migrating;
pub mod feature_flags;
}
}
#[doc(inline)]
pub use crate::{de::DeserializeAs, ser::SerializeAs};
use serde::{ser::Serialize, Deserializer, Serializer};
// Re-Export all proc_macros, as these should be seen as part of the serde_with crate
#[cfg(feature = "macros")]
#[doc(inline)]
pub use serde_with_macros::*;
use std::marker::PhantomData;
/// Separator for string-based collection de/serialization
pub trait Separator {
/// Return the string delimiting two elements in the string-based collection
fn separator() -> &'static str;
}
/// Predefined separator using a single space
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct | ;
impl Separator for SpaceSeparator {
#[inline]
fn separator() -> &'static str {
" "
}
}
/// Predefined separator using a single comma
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct CommaSeparator;
impl Separator for CommaSeparator {
#[inline]
fn separator() -> &'static str {
","
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct As<T>(PhantomData<T>);
impl<T> As<T> {
pub fn serialize<S, I>(value: &I, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: SerializeAs<I>,
{
T::serialize_as(value, serializer)
}
pub fn deserialize<'de, D, I>(deserializer: D) -> Result<I, D::Error>
where
T: DeserializeAs<'de, I>,
D: Deserializer<'de>,
{
T::deserialize_as(deserializer)
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Same;
#[derive(Copy, Clone, Debug, Default)]
pub struct DisplayFromStr;
#[derive(Copy, Clone, Debug, Default)]
pub struct NoneAsEmptyString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DefaultOnError<T>(PhantomData<T>);
#[derive(Copy, Clone, Debug, Default)]
pub struct BytesOrString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSeconds<
FORMAT: formats::Format = u64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSecondsWithFrac<
FORMAT: formats::Format = f64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
| SpaceSeparator | identifier_name |
lib.rs | #![deny(
// missing_copy_implementations,
// missing_debug_implementations,
// missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences
)]
// #![warn(rust_2018_idioms)]
#![doc(test(attr(deny(
missing_copy_implementations,
missing_debug_implementations,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
variant_size_differences,
))))]
#![doc(test(attr(warn(rust_2018_idioms))))]
// Not needed for 2018 edition and conflicts with `rust_2018_idioms`
#![doc(test(no_crate_inject))]
#![doc(html_root_url = "https://docs.rs/serde_with/1.5.0-alpha.1")]
//! [](https://docs.rs/serde_with/)
//! [](https://crates.io/crates/serde_with/)
//! [](https://github.com/jonasbb/serde_with)
//! [](https://codecov.io/gh/jonasbb/serde_with)
//!
//! ---
//!
//! This crate provides custom de/serialization helpers to use in combination with [serde's with-annotation][with-annotation] and with the improved [`serde_as`][]-annotation.
//! Some common use cases are:
//!
//! * De/Serializing a type using the `Display` and `FromStr` traits, e.g., for `u8`, `url::Url`, or `mime::Mime`.
//! Check [`DisplayFromStr`][] or [`serde_with::rust::display_fromstr`][display_fromstr] for details.
//! * Skip serializing all empty `Option` types with [`#[skip_serializing_none]`][skip_serializing_none].
//! * Apply a prefix to each fieldname of a struct, without changing the de/serialize implementations of the struct using [`with_prefix!`][].
//! * Deserialize a comma separated list like `#hash,#tags,#are,#great` into a `Vec<String>`.
//! Check the documentation for [`serde_with::rust::StringWithSeparator::<CommaSeparator>`][StringWithSeparator].
//!
//! Check out the [**user guide**][user guide] to find out more tips and tricks about this crate.
//!
//! # Use `serde_with` in your Project
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies.serde_with]
//! version = "1.5.0-alpha.1"
//! features = [ "..." ]
//! ```
//!
//! The crate contains different features for integration with other common crates.
//! Check the [feature flags][] section for information about all available features.
//!
//! # Examples
//!
//! Annotate your struct or enum to enable the custom de/serializer.
//!
//! ## `DisplayFromStr`
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{serde_as, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize with Display, deserialize with FromStr
//! #[serde_as(as = "DisplayFromStr")]
//! bar: u8,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {bar: 12}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"bar": "12"}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## `skip_serializing_none`
//!
//! This situation often occurs with JSON, but other formats also support optional fields.
//! If many fields are optional, putting the annotations on the structs can become tedious.
//!
//! ```rust
//! # #[cfg(feature = "macros")]
//! # use serde_derive::{Deserialize, Serialize};
//! # #[cfg(feature = "macros")]
//! # use serde_with::{skip_serializing_none, DisplayFromStr};
//! # #[cfg(feature = "macros")]
//! #[skip_serializing_none]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! a: Option<usize>,
//! b: Option<usize>,
//! c: Option<usize>,
//! d: Option<usize>,
//! e: Option<usize>,
//! f: Option<usize>,
//! g: Option<usize>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json"))] {
//! // This will serialize
//! # let foo =
//! Foo {a: None, b: None, c: None, d: Some(4), e: None, f: None, g: Some(7)}
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {"d": 4, "g": 7}
//! # "#;
//! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! ## Advanced `serde_as` usage
//!
//! This example is mainly supposed to highlight the flexibility of the `serde_as`-annotation compared to [serde's with-annotation][with-annotation].
//! More details about `serde_as` can be found in the [user guide][].
//!
//!
//! ```rust
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! # use {
//! # serde_derive::{Deserialize, Serialize},
//! # serde_with::{serde_as, DisplayFromStr, DurationSeconds, hex::Hex},
//! # std::time::Duration,
//! # std::collections::BTreeMap,
//! # };
//! # #[cfg(all(feature = "macros", feature = "hex"))]
//! #[serde_as]
//! # #[derive(Debug, Eq, PartialEq)]
//! #[derive(Deserialize, Serialize)]
//! struct Foo {
//! // Serialize them into a list of number as seconds
//! #[serde_as(as = "Vec<DurationSeconds>")]
//! durations: Vec<Duration>,
//! // We can treat a Vec like a map with duplicates.
//! // JSON only allows string keys, so convert i32 to strings
//! // The bytes will be hex encoded
//! #[serde_as(as = "BTreeMap<DisplayFromStr, Hex>")]
//! bytes: Vec<(i32, Vec<u8>)>,
//! }
//!
//! # #[cfg(all(feature = "macros", feature = "json", feature = "hex"))] {
//! // This will serialize
//! # let foo =
//! Foo {
//! durations: vec![Duration::new(5, 0), Duration::new(3600, 0), Duration::new(0, 0)],
//! bytes: vec![
//! (1, vec![0, 1, 2]),
//! (-100, vec![100, 200, 255]),
//! (1, vec![0, 111, 222]),
//! ],
//! }
//! # ;
//!
//! // into this JSON
//! # let json = r#"
//! {
//! "durations": [5, 3600, 0],
//! "bytes": {
//! "1": "000102",
//! "-100": "64c8ff", | //! # assert_eq!(json.replace(" ", "").replace("\n", ""), serde_json::to_string(&foo).unwrap());
//! # assert_eq!(foo, serde_json::from_str(&json).unwrap());
//! # }
//! ```
//!
//! [`DisplayFromStr`]: https://docs.rs/serde_with/*/serde_with/struct.DisplayFromStr.html
//! [`serde_as`]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [`with_prefix!`]: https://docs.rs/serde_with/*/serde_with/macro.with_prefix.html
//! [display_fromstr]: https://docs.rs/serde_with/*/serde_with/rust/display_fromstr/index.html
//! [feature flags]: https://docs.rs/serde_with/*/serde_with/guide/feature_flags/index.html
//! [skip_serializing_none]: https://docs.rs/serde_with/*/serde_with/attr.skip_serializing_none.html
//! [StringWithSeparator]: https://docs.rs/serde_with/*/serde_with/rust/struct.StringWithSeparator.html
//! [user guide]: https://docs.rs/serde_with/*/serde_with/guide/index.html
//! [with-annotation]: https://serde.rs/field-attrs.html#with
#[doc(hidden)]
pub extern crate serde;
#[cfg(feature = "chrono")]
pub mod chrono;
pub mod de;
mod duplicate_key_impls;
mod flatten_maybe;
pub mod formats;
#[cfg(feature = "hex")]
pub mod hex;
#[cfg(feature = "json")]
pub mod json;
pub mod rust;
pub mod ser;
mod utils;
#[doc(hidden)]
pub mod with_prefix;
// Taken from shepmaster/snafu
// Originally licensed as MIT+Apache 2
// https://github.com/shepmaster/snafu/blob/fd37d79d4531ed1d3eebffad0d658928eb860cfe/src/lib.rs#L121-L165
#[cfg(feature = "guide")]
macro_rules! generate_guide {
(pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { } $($rest)*);
};
(pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
generate_guide!(@gen ".", pub mod $name { $($children)* } $($rest)*);
};
(@gen $prefix:expr, ) => {};
(@gen $prefix:expr, pub mod $name:ident; $($rest:tt)*) => {
generate_guide!(@gen $prefix, pub mod $name { } $($rest)*);
};
(@gen $prefix:expr, @code pub mod $name:ident; $($rest:tt)*) => {
pub mod $name;
generate_guide!(@gen $prefix, $($rest)*);
};
(@gen $prefix:expr, pub mod $name:ident { $($children:tt)* } $($rest:tt)*) => {
doc_comment::doc_comment! {
include_str!(concat!($prefix, "/", stringify!($name), ".md")),
pub mod $name {
generate_guide!(@gen concat!($prefix, "/", stringify!($name)), $($children)*);
}
}
generate_guide!(@gen $prefix, $($rest)*);
};
}
#[cfg(feature = "guide")]
generate_guide! {
pub mod guide {
pub mod migrating;
pub mod feature_flags;
}
}
#[doc(inline)]
pub use crate::{de::DeserializeAs, ser::SerializeAs};
use serde::{ser::Serialize, Deserializer, Serializer};
// Re-Export all proc_macros, as these should be seen as part of the serde_with crate
#[cfg(feature = "macros")]
#[doc(inline)]
pub use serde_with_macros::*;
use std::marker::PhantomData;
/// Separator for string-based collection de/serialization
pub trait Separator {
/// Return the string delimiting two elements in the string-based collection
fn separator() -> &'static str;
}
/// Predefined separator using a single space
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct SpaceSeparator;
impl Separator for SpaceSeparator {
#[inline]
fn separator() -> &'static str {
" "
}
}
/// Predefined separator using a single comma
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct CommaSeparator;
impl Separator for CommaSeparator {
#[inline]
fn separator() -> &'static str {
","
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct As<T>(PhantomData<T>);
impl<T> As<T> {
pub fn serialize<S, I>(value: &I, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: SerializeAs<I>,
{
T::serialize_as(value, serializer)
}
pub fn deserialize<'de, D, I>(deserializer: D) -> Result<I, D::Error>
where
T: DeserializeAs<'de, I>,
D: Deserializer<'de>,
{
T::deserialize_as(deserializer)
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Same;
#[derive(Copy, Clone, Debug, Default)]
pub struct DisplayFromStr;
#[derive(Copy, Clone, Debug, Default)]
pub struct NoneAsEmptyString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DefaultOnError<T>(PhantomData<T>);
#[derive(Copy, Clone, Debug, Default)]
pub struct BytesOrString;
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSeconds<
FORMAT: formats::Format = u64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>);
#[derive(Copy, Clone, Debug, Default)]
pub struct DurationSecondsWithFrac<
FORMAT: formats::Format = f64,
STRICTNESS: formats::Strictness = formats::Strict,
>(PhantomData<(FORMAT, STRICTNESS)>); | //! "1": "006fde"
//! }
//! }
//! # "#; | random_line_split |
notification_client.rs | app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> |
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
}
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn try_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) =
self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast());
timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules &&!push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&&!timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
| {
if !self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if !is_still_encrypted {
return Ok(None);
} | identifier_body |
notification_client.rs | app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> {
if!self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if!is_still_encrypted {
return Ok(None);
}
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
}
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn | (
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) =
self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast());
timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules &&!push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&&!timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
| try_sliding_sync | identifier_name |
notification_client.rs | an app.
///
/// In particular, it takes care of running a full decryption sync, in case the
/// event in the notification was impossible to decrypt beforehand.
pub struct NotificationClient {
/// SDK client that uses an in-memory state store.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
/// Should we retry decrypting an event, after it was impossible to decrypt
/// on the first attempt?
retry_decryption: bool,
/// Should the encryption sync happening in case the notification event was
/// encrypted use a cross-process lock?
///
/// Only meaningful if `retry_decryption` is true.
with_cross_process_lock: bool,
/// Should we try to filter out the notification event according to the push
/// rules?
filter_by_push_rules: bool,
/// A mutex to serialize requests to sliding sync.
///
/// If several notifications come in at the same time (e.g. network was
/// unreachable because of airplane mode or something similar), then we
/// need to make sure that repeated calls to `get_notification` won't
/// cause multiple requests with the same `conn_id` we're using for
/// notifications. This mutex solves this by sequentializing the requests.
sliding_sync_mutex: AsyncMutex<()>,
}
impl NotificationClient {
const CONNECTION_ID: &str = "notifications";
const LOCK_ID: &str = "notifications";
/// Create a new builder for a notification client.
pub async fn builder(client: Client) -> Result<NotificationClientBuilder, Error> {
NotificationClientBuilder::new(client).await
}
/// Fetches the content of a notification.
///
/// This will first try to get the notification using a short-lived sliding
/// sync, and if the sliding-sync can't find the event, then it'll use a
/// `/context` query to find the event with associated member information.
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
match self.get_notification_with_sliding_sync(room_id, event_id).await? {
NotificationStatus::Event(event) => Ok(Some(event)),
NotificationStatus::EventFilteredOut => Ok(None),
NotificationStatus::EventNotFound => {
self.get_notification_with_context(room_id, event_id).await
}
}
}
/// Run an encryption sync loop, in case an event is still encrypted.
///
/// Will return true if and only:
/// - retry_decryption was enabled,
/// - the event was encrypted,
/// - we successfully ran an encryption sync.
async fn maybe_retry_decryption(
&self,
room: &Room,
raw_event: &Raw<AnySyncTimelineEvent>,
) -> Result<Option<TimelineEvent>, Error> {
if!self.retry_decryption {
return Ok(None);
}
let event: AnySyncTimelineEvent =
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?;
let event_type = event.event_type();
let is_still_encrypted =
matches!(event_type, ruma::events::TimelineEventType::RoomEncrypted);
#[cfg(feature = "unstable-msc3956")]
let is_still_encrypted =
is_still_encrypted || matches!(event_type, ruma::events::TimelineEventType::Encrypted);
if!is_still_encrypted {
return Ok(None);
}
// The message is still encrypted, and the client is configured to retry
// decryption.
//
// Spawn an `EncryptionSync` that runs two iterations of the sliding sync loop:
// - the first iteration allows to get SS events as well as send e2ee requests.
// - the second one let the SS proxy forward events triggered by the sending of
// e2ee requests.
//
// Keep timeouts small for both, since we might be short on time.
let with_locking = WithLocking::from(self.with_cross_process_lock);
let encryption_sync = EncryptionSync::new(
Self::LOCK_ID.to_owned(),
self.client.clone(),
Some((Duration::from_secs(3), Duration::from_secs(4))),
with_locking,
)
.await;
// Just log out errors, but don't have them abort the notification processing:
// an undecrypted notification is still better than no
// notifications.
match encryption_sync {
Ok(sync) => match sync.run_fixed_iterations(2).await {
Ok(()) => {
let new_event = room.decrypt_event(raw_event.cast_ref()).await?;
Ok(Some(new_event))
}
Err(err) => {
tracing::warn!(
"error when running encryption_sync in get_notification: {err:#}"
);
Ok(None)
}
},
Err(err) => {
tracing::warn!("error when building encryption_sync in get_notification: {err:#}",);
Ok(None)
}
}
}
/// Try to run a sliding sync (without encryption) to retrieve the event
/// from the notification.
///
/// This works by requesting explicit state that'll be useful for building
/// the `NotificationItem`, and subscribing to the room which the
/// notification relates to.
async fn try_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<RawNotificationEvent>, Error> {
// Serialize all the calls to this method by taking a lock at the beginning,
// that will be dropped later.
let _guard = self.sliding_sync_mutex.lock().await;
// Set up a sliding sync that only subscribes to the room that had the
// notification, so we can figure out the full event and associated
// information.
let notification = Arc::new(Mutex::new(None));
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let timeline_event_handler =
self.client.add_event_handler(move |raw: Raw<AnySyncTimelineEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() =
Some(RawNotificationEvent::Timeline(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
let cloned_notif = notification.clone();
let target_event_id = event_id.to_owned();
let stripped_member_handler =
self.client.add_event_handler(move |raw: Raw<StrippedRoomMemberEvent>| async move {
match raw.get_field::<OwnedEventId>("event_id") {
Ok(Some(event_id)) => {
if event_id == target_event_id {
// found it! There shouldn't be a previous event before, but if there
// is, that should be ok to just replace it.
*cloned_notif.lock().unwrap() = Some(RawNotificationEvent::Invite(raw));
}
}
Ok(None) | Err(_) => {
tracing::warn!("could not get event id");
}
}
});
// Room power levels are necessary to build the push context.
let required_state = vec![
(StateEventType::RoomAvatar, "".to_owned()),
(StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomMember, "$LAZY".to_owned()),
(StateEventType::RoomMember, "$ME".to_owned()),
(StateEventType::RoomCanonicalAlias, "".to_owned()),
(StateEventType::RoomName, "".to_owned()),
(StateEventType::RoomPowerLevels, "".to_owned()),
];
let invites = SlidingSyncList::builder("invites")
.sync_mode(SlidingSyncMode::new_selective().add_range(0..=16))
.timeline_limit(8)
.required_state(required_state.clone())
.filters(Some(assign!(SyncRequestListFilters::default(), {
is_invite: Some(true),
is_tombstoned: Some(false),
not_room_types: vec!["m.space".to_owned()],
})))
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]);
let sync = self
.client
.sliding_sync(Self::CONNECTION_ID)?
.poll_timeout(Duration::from_secs(1))
.network_timeout(Duration::from_secs(3))
.with_account_data_extension(
assign!(AccountDataConfig::default(), { enabled: Some(true) }),
)
.add_list(invites)
.build()
.await?;
sync.subscribe_to_room(
room_id.to_owned(),
Some(assign!(RoomSubscription::default(), {
required_state,
timeline_limit: Some(uint!(16))
})),
);
let mut remaining_attempts = 3;
let stream = sync.sync();
pin_mut!(stream);
loop {
if stream.next().await.is_none() {
// Sliding sync aborted early.
break;
}
if notification.lock().unwrap().is_some() {
// We got the event.
break;
}
remaining_attempts -= 1;
if remaining_attempts == 0 {
// We're out of luck.
break;
}
}
self.client.remove_event_handler(stripped_member_handler);
self.client.remove_event_handler(timeline_event_handler);
let maybe_event = notification.lock().unwrap().take();
Ok(maybe_event)
}
/// Get a full notification, given a room id and event id.
///
/// This will run a small sliding sync to retrieve the content of the event,
/// along with extra data to form a rich notification context.
pub async fn get_notification_with_sliding_sync(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<NotificationStatus, Error> {
tracing::info!("fetching notification event with a sliding sync");
let Some(mut raw_event) = self.try_sliding_sync(room_id, event_id).await? else {
return Ok(NotificationStatus::EventNotFound);
};
// At this point it should have been added by the sync, if it's not, give up.
let Some(room) = self.client.get_room(room_id) else { return Err(Error::UnknownRoom) };
let push_actions = match &raw_event {
RawNotificationEvent::Timeline(timeline_event) => {
// Timeline events may be encrypted, so make sure they get decrypted first.
if let Some(timeline_event) = | timeline_event.push_actions
} else {
room.event_push_actions(timeline_event).await?
}
}
RawNotificationEvent::Invite(invite_event) => {
// Invite events can't be encrypted, so they should be in clear text.
room.event_push_actions(invite_event).await?
}
};
if let Some(push_actions) = &push_actions {
if self.filter_by_push_rules &&!push_actions.iter().any(|a| a.should_notify()) {
return Ok(NotificationStatus::EventFilteredOut);
}
}
Ok(NotificationStatus::Event(
NotificationItem::new(&room, &raw_event, push_actions.as_deref(), Vec::new()).await?,
))
}
/// Retrieve a notification using a `/context` query.
///
/// This is for clients that are already running other sliding syncs in the
/// same process, so that most of the contextual information for the
/// notification should already be there. In particular, the room containing
/// the event MUST be known (via a sliding sync for invites, or another
/// sliding sync).
///
/// An error result means that we couldn't resolve the notification; in that
/// case, a dummy notification may be displayed instead. A `None` result
/// means the notification has been filtered out by the user's push
/// rules.
pub async fn get_notification_with_context(
&self,
room_id: &RoomId,
event_id: &EventId,
) -> Result<Option<NotificationItem>, Error> {
tracing::info!("fetching notification event with a /context query");
// See above comment.
let Some(room) = self.parent_client.get_room(room_id) else {
return Err(Error::UnknownRoom);
};
let (mut timeline_event, state_events) =
room.event_with_context(event_id, true).await?.ok_or(Error::ContextMissingEvent)?;
if let Some(decrypted_event) =
self.maybe_retry_decryption(&room, timeline_event.event.cast_ref()).await?
{
timeline_event = decrypted_event;
}
if self.filter_by_push_rules
&&!timeline_event
.push_actions
.as_ref()
.is_some_and(|actions| actions.iter().any(|a| a.should_notify()))
{
return Ok(None);
}
Ok(Some(
NotificationItem::new(
&room,
&RawNotificationEvent::Timeline(timeline_event.event.cast()),
timeline_event.push_actions.as_deref(),
state_events,
)
.await?,
))
}
}
#[derive(Debug)]
pub enum NotificationStatus {
Event(NotificationItem),
EventNotFound,
EventFilteredOut,
}
/// Builder for a `NotificationClient`.
///
/// Fields have the same meaning as in `NotificationClient`.
#[derive(Clone)]
pub struct NotificationClientBuilder {
/// SDK client that uses an in-memory state store, to be used with the
/// sliding sync method.
client: Client,
/// SDK client that uses the same state store as the caller's context.
parent_client: Client,
retry_decryption: bool,
with_cross_process_lock: bool,
filter_by_push_rules: bool,
}
impl NotificationClientBuilder {
async fn new(parent_client: Client) -> Result<Self, Error> {
let client = parent_client.notification_client().await?;
Ok(Self {
client,
parent_client,
retry_decryption: false,
with_cross_process_lock: false,
filter_by_push_rules: false,
})
}
/// Filter out the notification event according to the push rules present in
/// the event.
pub fn filter_by_push_rules(mut self) -> Self {
self.filter_by_push_rules = true;
self
}
/// Automatically retry decryption once, if the notification was received
/// encrypted.
///
/// The boolean indicates whether we're making use of a cross-process lock
/// for the crypto-store. This should be set to true, if and only if,
/// the notification is received in a process that's different from the
/// main app.
pub fn retry_decryption(mut self, with_cross_process_lock: bool) -> Self {
self.retry_decryption = true;
self.with_cross_process_lock = with_cross_process_lock;
self
}
/// Finishes configuring the `NotificationClient`.
pub fn build(self) -> NotificationClient {
NotificationClient {
client: self.client,
parent_client: self.parent_client,
with_cross_process_lock: self.with_cross_process_lock,
filter_by_push_rules: self.filter_by_push_rules,
retry_decryption: self.retry_decryption,
sliding_sync_mutex: AsyncMutex::new(()),
}
}
}
enum RawNotificationEvent {
Timeline(Raw<AnySyncTimelineEvent>),
Invite(Raw<StrippedRoomMemberEvent>),
}
#[derive(Debug)]
pub enum NotificationEvent {
Timeline(AnySyncTimelineEvent),
Invite(StrippedRoomMemberEvent),
}
impl NotificationEvent {
pub fn sender(&self) -> &UserId {
match self {
NotificationEvent::Timeline(ev) => ev.sender(),
NotificationEvent::Invite(ev) => &ev.sender,
}
}
}
/// A notification with its full content.
#[derive(Debug)]
pub struct NotificationItem {
/// Underlying Ruma event.
pub event: NotificationEvent,
/// Display name of the sender.
pub sender_display_name: Option<String>,
/// Avatar URL of the sender.
pub sender_avatar_url: Option<String>,
/// Room display name.
pub room_display_name: String,
/// Room avatar URL.
pub room_avatar_url: Option<String>,
/// Room canonical alias.
pub room_canonical_alias: Option<String>,
/// Is this room encrypted?
pub is_room_encrypted: Option<bool>,
/// Is this room considered a direct message?
pub is_direct_message_room: bool,
/// Numbers of members who joined the room.
pub joined_members_count: u64,
/// Is it a noisy notification? (i.e. does any push action contain a sound
/// action)
///
/// It is set if and only if the push actions could be determined.
pub is_noisy: Option<bool>,
}
impl NotificationItem {
async fn new(
room: &Room,
raw_event: &RawNotificationEvent,
push_actions: Option<&[Action]>,
state_events: Vec<Raw<AnyStateEvent>>,
) -> Result<Self, Error> {
let event = match raw_event {
RawNotificationEvent::Timeline(raw_event) => NotificationEvent::Timeline(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
RawNotificationEvent::Invite(raw_event) => NotificationEvent::Invite(
raw_event.deserialize().map_err(|_| Error::InvalidRumaEvent)?,
),
};
let sender = match room.state() {
RoomState::Invited => room.invite_details().await?.inviter,
_ => room.get_member_no_sync(event.sender()).await?,
};
let (mut sender_display_name, mut sender_avatar_url) = match &sender {
Some(sender) => (
sender.display_name().map(|s| s.to_owned()),
sender.avatar_url().map(|s| s.to_string()),
),
None => (None, None),
};
if sender_display_name.is_none() || sender_avatar_url.is_none() {
let sender_id = event.sender();
for ev in state_events {
let Ok(ev) = ev.deserialize() else {
continue;
}; | self.maybe_retry_decryption(&room, timeline_event).await?
{
raw_event = RawNotificationEvent::Timeline(timeline_event.event.cast()); | random_line_split |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status!= rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status!= rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if!file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8 | let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}
else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| {
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1]; | conditional_block |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status!= rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status!= rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if!file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data); | println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
} | total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
} | random_line_split |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status!= rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status!= rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>> | println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if!file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn main() {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| {
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
} | identifier_body |
main.rs | use std::env;
use std::fs;
extern crate rgsl;
use std::rc::Rc;
use std::cell::RefCell;
use plotters::prelude::*;
use std::time::Instant;
use std::path::Path;
use std::io::{Error, ErrorKind};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use image::{imageops::FilterType, ImageFormat};
#[macro_use]
extern crate lazy_static;
lazy_static!{
static ref AIR_REFRACTIVE_INDEX:f64 = 1f64;
static ref CORNEA_REFRACTIVE_INDEX:f64 = 1.37;
}
macro_rules! clone {
(@param _) => ( _ );
(@param $x:ident) => ( mut $x );
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
move || $body
}
);
($($n:ident),+ => move |$($p:tt),+| $body:expr) => (
{
$( let $n = $n.clone(); )+
move |$(clone!(@param $p),)+| $body
}
);
}
fn exp_f(x: &rgsl::VectorF64, f: &mut rgsl::VectorF64, data: &Data) -> rgsl::Value {
let a = x.get(0);
let b = x.get(1);
let c = x.get(2);
for (i, (x, y)) in data.x.iter().zip(data.y.iter()).enumerate(){
/* Model Yi = a * x^2 + b * x + c*/
let yi = a * x.powi(2) + b * x + c;
f.set(i, yi - y);
}
rgsl::Value::Success
}
fn exp_df(x: &rgsl::VectorF64, J: &mut rgsl::MatrixF64, data: &Data) -> rgsl::Value {
for (i, x) in data.x.iter().enumerate(){
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i], */
/* Yi = A * exp(-lambda * i) + b */
/* and the xj are the parameters (A,lambda,b) */
J.set(i, 0, x.powi(2));
J.set(i, 1, *x);
J.set(i, 2, 1f64);
}
rgsl::Value::Success
}
fn print_state(iter: usize, s: &rgsl::MultiFitFdfSolver) {
println!("iter: {} x = {} {} {} |f(x)| = {}", iter,
s.x().get(0), s.x().get(1), s.x().get(2), rgsl::blas::level1::dnrm2(&s.f()));
}
#[derive(Debug)]
pub struct Data{
pub x: Vec<f64>,
pub y: Vec<f64>,
n: usize
}
fn read_file_into_lines(filename: &str)-> Vec<String>{
let contents = fs::read_to_string(
filename).expect(&format!("can't read file {}", filename));
let rows: Vec<String> = contents.split('\n').map(|s| s.to_string()).collect();
rows
}
fn bound_in_axis(x: f32, y: f32) -> bool{
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
x > x_low as f32 && x < x_high as f32 && y > y_low as f32 && y < y_high as f32
}
fn plot_parabola(data: &Data, params: &rgsl::VectorF64, additional_points: Vec<Vec<f64>>, fig_path: &String, image_path: &String){
let root = BitMapBackend::new(fig_path, (1024, 1024)).into_drawing_area();
root.fill(&WHITE).unwrap();
let mut chart = ChartBuilder::on(&root)
.caption("Optical Distortion Correction", ("sans-serif", 10).into_font())
.margin(5)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(8f32..-8f32, 16f32..0f32).unwrap();
//chart.configure_mesh().draw().unwrap();
// Plot background image
//let (w, h) = chart.plotting_area().dim_in_pixel();
//println!("plotting area: {}, {}", w, h);
//let image = image::load(
//BufReader::new(File::open(image_path).unwrap()),
//ImageFormat::Bmp,
//).unwrap()
//.resize_exact(16, 16, FilterType::Nearest);
//let elem: BitMapElement<_> = ((-8.0, 8.0), image).into();
//chart.draw_series(std::iter::once(elem)).unwrap();
// Draw Overlay points
let x_low = -8;
let x_high = 8;
let y_low = 0;
let y_high = 16;
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
// Draw parabola
chart.draw_series(LineSeries::new(
(x_low..=x_high).map(|x| {x as f32}).map(|x| (x, x.powi(2) * a as f32 + x * b as f32 + c as f32)),
&RED,
)).unwrap()
.label(format!("y = {:.3}x^2 + {:.3}x + {:.3}", a, b, c)).legend(|(x, y)| PathElement::new(vec![(x, y), (x + 2, y)], &RED));
// Draw cornea points
chart.draw_series(PointSeries::of_element(data.x.iter().zip(data.y.iter()).map(|(x, y)| (*x as f32, *y as f32)), 2, ShapeStyle::from(&RED).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Plot pupil margins
chart.draw_series(PointSeries::of_element(additional_points.iter().map(|v| (v[0] as f32, v[1] as f32)), 3, ShapeStyle::from(&BLUE).filled(),
&|coord, size, style| {
EmptyElement::at(coord)
+ Circle::new((0, 0), size, style)
})).unwrap();
// Draw inbound ray
let pm0_x = additional_points[0][0];
let pm0_boundary = vec![pm0_x, a * pm0_x.powi(2) + b * pm0_x + c];
let pm1_x = additional_points[1][0];
let pm1_boundary = vec![pm1_x, a * pm1_x.powi(2) + b * pm1_x + c];
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm0_x as f32, y)),
&BLUE)).unwrap();
chart.draw_series(LineSeries::new(
(y_low..=y_high+8).map(|y| {y as f32}).map(|y| (pm1_x as f32, y)),
&BLUE)).unwrap();
// Draw tangent line
let tangent_line_0_k = 2f64 * a * pm0_x + b;
let tangent_line_0_b = pm0_boundary[1] - tangent_line_0_k * pm0_x;
let mut tangent_line_low = ((pm0_x - 2f64) * 10f64) as i32;
let mut tangent_line_high = ((pm0_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k as f32 * x + tangent_line_0_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_0_k_vert = -1f64 / tangent_line_0_k;
let tangent_line_0_b_vert = pm0_boundary[1] - (-1f64 / tangent_line_0_k) * pm0_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_0_k_vert as f32 * x + tangent_line_0_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
// Draw tangent line
let tangent_line_1_k = 2f64 * a * pm1_x + b;
let tangent_line_1_b = pm1_boundary[1] - tangent_line_1_k * pm1_x;
tangent_line_low = ((pm1_x - 2f64) * 10f64) as i32;
tangent_line_high = ((pm1_x + 2f64) * 10f64) as i32;
//println!("{}, {}", tangent_line_low, tangent_line_high);
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k as f32 * x + tangent_line_1_b as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
let tangent_line_1_k_vert = -1f64 / tangent_line_1_k;
let tangent_line_1_b_vert = pm1_boundary[1] - (-1f64 / tangent_line_1_k) * pm1_x;
chart.draw_series(LineSeries::new(
(tangent_line_low..=tangent_line_high).map(|x| x as f32 / 10f32).map(|x| (x, tangent_line_1_k_vert as f32 * x + tangent_line_1_b_vert as f32))
.filter(
|(x, y)| bound_in_axis(*x, *y))
,&BLACK
)).unwrap();
chart
.configure_series_labels()
.background_style(&WHITE.mix(0.8))
.border_style(&BLACK)
.draw().unwrap();
}
fn curve_fitting(data: Rc<Data>) -> Result<rgsl::MultiFitFdfSolver, Error>{
//Initialize Parameters
let num_params = 3;
let num_instances = data.n;
let mut status = rgsl::Value::Success;
let mut J = match rgsl::MatrixF64::new(num_params, num_params){
Some(mat) => mat,
None => return Err(Error::new(ErrorKind::Other, "Can't create Jacobian matrix"))
};
let mut params_init = [0f64, -1f64, 100f64];
let mut params = rgsl::VectorView::from_array(&mut params_init);
rgsl::RngType::env_setup();
let rng_type = rgsl::rng::default();
let mut rng = match rgsl::Rng::new(&rng_type){
Some(r) => r,
None => return Err(Error::new(ErrorKind::Other, "Can't create rng"))
};
let solver_type = rgsl::MultiFitFdfSolverType::lmsder();
let mut func = rgsl::MultiFitFunctionFdf::new(num_instances, num_params);
let expb_f = clone!(data => move |x, f| {
exp_f(&x, &mut f, &*data)
});
func.f = Some(Box::new(expb_f));
let expb_df = clone!(data => move |x, J| {
exp_df(&x, &mut J, &*data)
});
func.df = Some(Box::new(expb_df));
let expb_fdf = clone!(data => move |x, f, J| {
exp_f(&x, &mut f, &*data);
exp_df(&x, &mut J, &*data);
rgsl::Value::Success
});
func.fdf = Some(Box::new(expb_fdf));
// Create a solver
let mut solver = match rgsl::MultiFitFdfSolver::new(&solver_type, num_instances, num_params){
Some(s) => s,
None => return Err(Error::new(ErrorKind::Other, "can't create solver"))
};
solver.set(&mut func, ¶ms.vector());
let mut iter = 0;
loop {
iter += 1;
status = solver.iterate();
println!("status = {}", rgsl::error::str_error(status));
print_state(iter, &solver);
if status!= rgsl::Value::Success {
//return Err(Error::new(ErrorKind::TimedOut, "Reconstruction failed"));
break;
}
//println!("dx: {:?}", &solver.dx());
//println!("position: {:?}", &s.position());
status = rgsl::multifit::test_delta(&solver.dx(), &solver.x(), 1e-4, 1e-4);
if status!= rgsl::Value::Continue || iter >= 500 {
break;
}
}
println!("Done");
println!("params: {:?}", &solver.x());
Ok(solver)
}
fn apply_optical_distortion_correction(params: &rgsl::VectorF64, points: &Vec<Vec<f64>>) -> Vec<Vec<f64>>{
let a = params.get(0);
let b = params.get(1);
let c = params.get(2);
let mut new_points: Vec<Vec<f64>> = Vec::new();
for point in points.iter(){
let k1 = 2f64 * a * point[0] + b;
let theta_1 = k1.atan();
let mut theta_2 = 0f64;
let mut theta = 0f64;
let mut new_point: Vec<f64> = Vec::new();
if theta_1 > 0f64{
theta_2 = theta_1 * *AIR_REFRACTIVE_INDEX/ *CORNEA_REFRACTIVE_INDEX;
theta = theta_1 - theta_2;
}else{
theta_2 = theta_1.abs() / *CORNEA_REFRACTIVE_INDEX;
theta = -1f64 * (theta_1.abs() - theta_2);
}
println!("theta: {}", theta);
let boundary_point = vec![point[0], a * point[0].powi(2) + b * point[0] + c];
let new_length = (boundary_point[1] - point[1]) / *CORNEA_REFRACTIVE_INDEX;
new_point.push(boundary_point[0] + new_length * theta.sin());
new_point.push(boundary_point[1] - new_length * theta.cos());
println!("old: {:?}, new: {:?}", point, new_point);
new_points.push(new_point);
}
new_points
}
/*
fn run_on_folder(folder: &str){
let mut total_time_ms = 0;
let mut iter = 0;
for entry in fs::read_dir(folder).unwrap(){
let entry = entry.unwrap();
let file_path = entry.path();
if!file_path.is_dir(){
let lines = read_file_into_lines(file_path.to_str().unwrap());
println!("process: {:?}", file_path);
// Create Data
let mut data = Rc::new(RefCell::new(Data{
x: Vec::<f64>::new(),
y: Vec::<f64>::new(),
n: 0
}));
for (i, line) in lines.iter().enumerate(){
let mut data = *data.borrow_mut();
let numbers: Vec<f64> = line.split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
//println!("data: {:?}", numbers);
if numbers.len() > 1{
data.x.push(*numbers.get(0).unwrap());
data.y.push(*numbers.get(1).unwrap());
data.n += 1;
}
}
//println!("Data: {:?}", data);
if data.borrow().n >= 3{
let fig_path: String = format!("./figs/{}.png", file_path.file_name().unwrap().to_str().unwrap());
let now = Instant::now();
let curve_params = curve_fitting(data);
total_time_ms += now.elapsed().as_micros();
iter += 1;
}
}
}
println!("run {} iterations", iter);
println!("average time: {} microsecond", total_time_ms/ iter);
}
*/
fn | () {
let args: Vec<String> = env::args().collect();
let path = Path::new(&args[1]);
if path.is_dir(){
//run_on_folder(&path.to_str().unwrap());
println!("TODO: fix bug in run folder");
}else{
let lines = read_file_into_lines(path.to_str().unwrap());
let mut total_time_micros = 0;
let mut iter = 0;
let mut file = File::create("cross_corrected.keypoint").unwrap();
file.write_all(b"scanId\teyeId\tbscanId\tpm0xmm\tpm0ymm\tpm0zmm\tpm1xmm\tpm1ymm\tpm1zmm\n");
for line in lines.iter().skip(1){
let elements: Vec<&str> = line.split(",").collect();
if elements.len() > 8{
let pm0: Vec<f64> = vec![elements[3], elements[4]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let pm1: Vec<f64> = vec![elements[5], elements[6]].iter().filter_map(|e| e.parse::<f64>().ok()).collect();
let cp_x: Vec<f64> = elements[7].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let cp_y: Vec<f64> = elements[8].split(" ").filter_map(|v| v.parse::<f64>().ok()).collect();
let num_kp = cp_y.len();
let mut data = Rc::new(Data{
x: cp_x,
y: cp_y,
n: num_kp
});
//println!("{:?}", data.x);
if data.n >= 3{
let now = Instant::now();
let solver = match curve_fitting(Rc::clone(&data)){
Ok(p) => p,
Err(e) => panic!("can't reconstruct curve")
};
println!("{:?}", solver.x());
let mut add_points = vec![pm0, pm1];
let corrected_points = apply_optical_distortion_correction(&solver.x(), &add_points);
if elements[2] == "x"{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
corrected_points[0][0], 0, corrected_points[0][1],
corrected_points[1][0], 0, corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}else{
let output_str = format!("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n", elements[0],
elements[1], elements[2],
-1.53, corrected_points[0][0], corrected_points[0][1],
-1.53, corrected_points[1][0], corrected_points[1][1]);
file.write_all(output_str.as_bytes()).unwrap();
}
//println!("{:?}", corrected_points);
total_time_micros += now.elapsed().as_micros();
iter += 1;
add_points.extend(corrected_points);
let image_path = format!("./resource/TestMotility/OD/{}/0{}.bmp", elements[2], elements[0]);
println!("image: {}", image_path);
let fig_path: String = format!("./figs_mm/{}_{}.png", elements[0], elements[2]);
//plot_parabola(&data, &solver.x(), add_points, &fig_path, &image_path);
}
}else{
println!("total elements: {}", elements.len());
println!("Can't process {}", line);
}
}
println!("Total iteration: {}", iter);
println!("Average time: {}", total_time_micros / iter);
}
}
| main | identifier_name |
runner.rs | use graph::Graph;
use modules;
use num_cpus;
use runtime::{Environment, Runtime};
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::sync::mpsc;
use std::thread;
use task::Task;
use term;
#[derive(Clone)]
pub struct EnvironmentSpec {
/// Script path.
path: PathBuf,
/// Script directory.
directory: PathBuf,
/// Module include paths.
include_paths: Vec<PathBuf>,
/// Global environment variables.
variables: Vec<(String, String)>,
/// Indicates if actually running tasks should be skipped.
dry_run: bool,
/// Indicates if up-to-date tasks should be run anyway.
always_run: bool,
/// Indicates task errors should be ignored.
keep_going: bool,
}
impl EnvironmentSpec {
/// Creates an environment from the environment specification.
pub fn create(&self) -> Result<Runtime, Box<Error>> {
// Prepare a new environment.
let environment = try!(Environment::new(self.path.clone()));
let runtime = Runtime::new(environment);
// Open standard library functions.
runtime.state().open_libs();
// Register modules.
modules::register_all(&runtime);
// Set include paths.
for path in &self.include_paths {
runtime.include_path(&path);
}
// Set the OS
runtime.state().push_string(if cfg!(windows) {
"windows"
} else {
"unix"
});
runtime.state().set_global("OS");
// Set configured variables.
for &(ref name, ref value) in &self.variables {
runtime.state().push(value.clone());
runtime.state().set_global(&name);
}
// Load the script.
try!(runtime.load());
Ok(runtime)
}
}
/// A task runner object that holds the state for defined tasks, dependencies, and the scripting
/// runtime.
pub struct Runner {
/// The current DAG for tasks.
graph: Graph,
/// The number of threads to use.
jobs: usize,
/// Environment specification.
spec: EnvironmentSpec,
/// Runtime local owned by the master thread.
runtime: Option<Runtime>,
}
impl Runner {
/// Creates a new runner instance.
pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> {
// By default, set the number of jobs to be one less than the number of available CPU cores.
let jobs = cmp::max(1, num_cpus::get() - 1);
let path = path.into();
let directory: PathBuf = match path.parent() {
Some(path) => path.into(),
None => {
return Err("failed to parse script directory".into());
}
};
Ok(Runner {
graph: Graph::new(),
jobs: jobs as usize,
spec: EnvironmentSpec {
path: path.into(),
directory: directory,
include_paths: Vec::new(),
variables: Vec::new(),
dry_run: false,
always_run: false,
keep_going: false,
},
runtime: None,
})
}
pub fn path(&self) -> &Path {
&self.spec.path
}
pub fn directory(&self) -> &Path {
&self.spec.directory
}
/// Sets "dry run" mode.
///
/// When in "dry run" mode, running tasks will operate as normal, except that no task's actions
/// will be actually run.
pub fn dry_run(&mut self) {
self.spec.dry_run = true;
}
/// Run all tasks even if they are up-to-date.
pub fn always_run(&mut self) {
self.spec.always_run = true;
}
/// Run all tasks even if they throw errors.
pub fn keep_going(&mut self) {
self.spec.keep_going = true;
}
/// Sets the number of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if!spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while!queue.is_empty() ||!current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
for dependency in queue.front().unwrap().dependencies() {
// Check that the dependency needs scheduled at all (some are already satisfied),
// and that it hasn't already finished.
if all_tasks.contains(dependency) &&!completed_tasks.contains(dependency) {
// We can't run the next task, so we're done scheduling for now until another
// thread finishes.
break'schedule;
}
}
// Get the available task from the queue.
let task = queue.front().unwrap().clone();
// Pick a free thread to run the task in.
if let Some(thread_id) = free_threads.iter().next().map(|t| *t) {
trace!("scheduling task '{}' on thread {}", task.name(), thread_id);
let data = (task.name().to_string(), task_count - queue.len() + 1);
// Send the task name.
if channels[thread_id].send(data).is_ok() {
current_tasks.insert(thread_id, task.name().to_string());
free_threads.remove(&thread_id);
// Scheduling was successful, so remove the task frome the queue.
queue.pop_front().unwrap();
} else {
trace!("failed to send channel to thread {}", thread_id);
}
} else {
// We can schedule now, but there aren't any free threads. 😢
break;
}
}
}
// Close the input and wait for any remaining threads to finish.
drop(channels);
for (thread_id, thread) in threads.into_iter().enumerate() {
if let Err(e) = thread.join() {
trace!("thread {} closed with panic: {:?}", thread_id, e);
}
}
info!("all tasks up to date");
Ok(())
}
fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> {
if!self.graph.contains(&name) {
// Lookup the task to run.
if let Some(task) = self.runtime().environment().get_task(&name) {
debug!("task '{}' matches named task", name.as_ref());
self.graph.insert(task.clone());
}
// Find a rule that matches the task name.
else if let Some(rule) = self.runtime()
.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern);
// Create a task for the rule and insert it in the graph.
self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap()));
}
// No matching task.
else {
| }
for dependency in self.graph.get(name).unwrap().dependencies() {
if!self.graph.contains(dependency) {
try!(self.resolve_task(dependency));
}
}
Ok(())
}
fn runtime(&self) -> Runtime {
self.runtime.as_ref().unwrap().clone()
}
}
| return Err(format!("no matching task or rule for '{}'", name.as_ref()).into());
}
| conditional_block |
runner.rs | use graph::Graph;
use modules;
use num_cpus;
use runtime::{Environment, Runtime};
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::error::Error;
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::sync::mpsc;
use std::thread;
use task::Task;
use term;
#[derive(Clone)]
pub struct EnvironmentSpec {
/// Script path.
path: PathBuf,
/// Script directory.
directory: PathBuf,
/// Module include paths.
include_paths: Vec<PathBuf>,
/// Global environment variables.
variables: Vec<(String, String)>,
/// Indicates if actually running tasks should be skipped.
dry_run: bool,
/// Indicates if up-to-date tasks should be run anyway.
always_run: bool,
/// Indicates task errors should be ignored.
keep_going: bool,
}
impl EnvironmentSpec {
/// Creates an environment from the environment specification.
pub fn create(&self) -> Result<Runtime, Box<Error>> {
// Prepare a new environment.
let environment = try!(Environment::new(self.path.clone()));
let runtime = Runtime::new(environment);
// Open standard library functions.
runtime.state().open_libs();
// Register modules.
modules::register_all(&runtime);
// Set include paths.
for path in &self.include_paths {
runtime.include_path(&path);
}
// Set the OS
runtime.state().push_string(if cfg!(windows) {
"windows"
} else {
"unix"
});
runtime.state().set_global("OS");
// Set configured variables.
for &(ref name, ref value) in &self.variables {
runtime.state().push(value.clone());
runtime.state().set_global(&name);
}
// Load the script.
try!(runtime.load());
Ok(runtime)
}
}
/// A task runner object that holds the state for defined tasks, dependencies, and the scripting
/// runtime.
pub struct Runner {
/// The current DAG for tasks.
graph: Graph,
/// The number of threads to use.
jobs: usize,
/// Environment specification.
spec: EnvironmentSpec,
/// Runtime local owned by the master thread.
runtime: Option<Runtime>,
}
impl Runner {
/// Creates a new runner instance.
pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> {
// By default, set the number of jobs to be one less than the number of available CPU cores.
let jobs = cmp::max(1, num_cpus::get() - 1);
let path = path.into();
let directory: PathBuf = match path.parent() {
Some(path) => path.into(),
None => {
return Err("failed to parse script directory".into());
}
};
Ok(Runner {
graph: Graph::new(),
jobs: jobs as usize,
spec: EnvironmentSpec {
path: path.into(),
directory: directory,
include_paths: Vec::new(),
variables: Vec::new(),
dry_run: false,
always_run: false,
keep_going: false,
},
runtime: None,
})
}
pub fn path(&self) -> &Path {
&self.spec.path
}
pub fn directory(&self) -> &Path {
&self.spec.directory
}
/// Sets "dry run" mode.
///
/// When in "dry run" mode, running tasks will operate as normal, except that no task's actions
/// will be actually run.
pub fn dry_run(&mut self) {
self.spec.dry_run = true;
}
/// Run all tasks even if they are up-to-date.
pub fn | (&mut self) {
self.spec.always_run = true;
}
/// Run all tasks even if they throw errors.
pub fn keep_going(&mut self) {
self.spec.keep_going = true;
}
/// Sets the number of threads to use to run tasks.
pub fn jobs(&mut self, jobs: usize) {
self.jobs = jobs;
}
/// Adds a path to Lua's require path for modules.
pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) {
self.spec.include_paths.push(path.into());
}
/// Sets a variable value.
pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) {
self.spec.variables.push((name.as_ref().to_string(), value.into()));
}
/// Load the script.
pub fn load(&mut self) -> Result<(), Box<Error>> {
if self.runtime.is_none() {
self.runtime = Some(try!(self.spec.create()));
}
Ok(())
}
/// Prints the list of named tasks for a script.
pub fn print_task_list(&mut self) {
let mut tasks = self.runtime().environment().tasks();
tasks.sort_by(|a, b| a.name().cmp(b.name()));
let mut out = term::stdout().unwrap();
println!("Available tasks:");
for task in tasks {
out.fg(term::color::BRIGHT_GREEN).unwrap();
write!(out, " {:16}", task.name()).unwrap();
out.reset().unwrap();
if let Some(ref description) = task.description() {
write!(out, "{}", description).unwrap();
}
writeln!(out, "").unwrap();
}
if let Some(ref default) = self.runtime().environment().default_task() {
println!("");
println!("Default task: {}", default);
}
}
/// Run the default task.
pub fn run_default(&mut self) -> Result<(), Box<Error>> {
if let Some(ref name) = self.runtime().environment().default_task() {
let tasks = vec![name];
self.run(&tasks)
} else {
Err("no default task defined".into())
}
}
/// Runs the specified list of tasks.
///
/// Tasks are run in parallel when possible during execution. The maximum number of parallel
/// jobs can be set with the `jobs()` method.
pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> {
// Resolve all tasks given.
for task in tasks {
try!(self.resolve_task(task));
}
// Determine the schedule of tasks to execute.
let mut queue = try!(self.graph.solve(!self.spec.always_run));
let task_count = queue.len();
let thread_count = cmp::min(self.jobs, task_count);
debug!("running {} task(s) across {} thread(s)",
task_count,
thread_count);
// Spawn one thread for each job.
let mut threads = Vec::new();
let mut free_threads: HashSet<usize> = HashSet::new();
let mut channels = Vec::new();
let (sender, receiver) = mpsc::channel::<Result<usize, usize>>();
// Spawn `jobs` number of threads (but no more than the task count!).
for thread_id in 0..thread_count {
let spec = self.spec.clone();
let thread_sender = sender.clone();
let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0);
channels.push(parent_sender);
threads.push(thread::spawn(move || {
// Prepare a new runtime.
let runtime = spec.create().unwrap_or_else(|e| {
error!("{}", e);
panic!();
});
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
}
// Begin executing tasks!
while let Ok((name, task_id)) = thread_receiver.recv() {
info!("running task '{}' ({} of {})", name, task_id, task_count);
// Lookup the task to run.
let task = {
// Lookup the task to run.
if let Some(task) = runtime.environment().get_task(&name) {
task as Rc<Task>
}
// Find a rule that matches the task name.
else if let Some(rule) = runtime.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
Rc::new(rule.create_task(name).unwrap()) as Rc<Task>
}
// No matching task.
else {
panic!("no matching task or rule for '{}'", name);
}
};
// Check for dry run.
if!spec.dry_run {
if let Err(e) = task.run() {
// If we ought to keep going, just issue a warning.
if spec.keep_going {
warn!("ignoring error: {}", e);
} else {
error!("{}", e);
thread_sender.send(Err(thread_id)).unwrap();
return;
}
}
} else {
info!("would run task '{}'", task.name());
}
if thread_sender.send(Ok(thread_id)).is_err() {
trace!("thread {} failed to send channel", thread_id);
break;
}
}
}))
}
drop(sender);
// Keep track of tasks completed and tasks in progress.
let mut completed_tasks: HashSet<String> = HashSet::new();
let mut current_tasks: HashMap<usize, String> = HashMap::new();
let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect();
while!queue.is_empty() ||!current_tasks.is_empty() {
// Wait for a thread to request a task.
let result = receiver.recv().unwrap();
// If the thread sent an error, we should stop everything if keep_going isn't enabled.
if let Err(thread_id) = result {
debug!("thread {} errored, waiting for remaining tasks...",
thread_id);
return Err("not all tasks completed successfully".into());
}
let thread_id = result.unwrap();
free_threads.insert(thread_id);
trace!("thread {} is idle", thread_id);
// If the thread was previously running a task, mark it as completed.
if let Some(task) = current_tasks.remove(&thread_id) {
trace!("task '{}' completed", task);
completed_tasks.insert(task);
}
// Attempt to schedule more tasks to run. The most we can schedule is the number of free
// threads, but it is limited by the number of tasks that have their dependencies already
// finished.
'schedule: for _ in 0..free_threads.len() {
// If the queue is empty, we are done.
if queue.is_empty() {
break;
}
// Check the next task in the queue. If any of its dependencies have not yet been
// completed, we cannot schedule it yet.
for dependency in queue.front().unwrap().dependencies() {
// Check that the dependency needs scheduled at all (some are already satisfied),
// and that it hasn't already finished.
if all_tasks.contains(dependency) &&!completed_tasks.contains(dependency) {
// We can't run the next task, so we're done scheduling for now until another
// thread finishes.
break'schedule;
}
}
// Get the available task from the queue.
let task = queue.front().unwrap().clone();
// Pick a free thread to run the task in.
if let Some(thread_id) = free_threads.iter().next().map(|t| *t) {
trace!("scheduling task '{}' on thread {}", task.name(), thread_id);
let data = (task.name().to_string(), task_count - queue.len() + 1);
// Send the task name.
if channels[thread_id].send(data).is_ok() {
current_tasks.insert(thread_id, task.name().to_string());
free_threads.remove(&thread_id);
// Scheduling was successful, so remove the task frome the queue.
queue.pop_front().unwrap();
} else {
trace!("failed to send channel to thread {}", thread_id);
}
} else {
// We can schedule now, but there aren't any free threads. 😢
break;
}
}
}
// Close the input and wait for any remaining threads to finish.
drop(channels);
for (thread_id, thread) in threads.into_iter().enumerate() {
if let Err(e) = thread.join() {
trace!("thread {} closed with panic: {:?}", thread_id, e);
}
}
info!("all tasks up to date");
Ok(())
}
fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> {
if!self.graph.contains(&name) {
// Lookup the task to run.
if let Some(task) = self.runtime().environment().get_task(&name) {
debug!("task '{}' matches named task", name.as_ref());
self.graph.insert(task.clone());
}
// Find a rule that matches the task name.
else if let Some(rule) = self.runtime()
.environment()
.rules()
.iter()
.find(|rule| rule.matches(&name)) {
debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern);
// Create a task for the rule and insert it in the graph.
self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap()));
}
// No matching task.
else {
return Err(format!("no matching task or rule for '{}'", name.as_ref()).into());
}
}
for dependency in self.graph.get(name).unwrap().dependencies() {
if!self.graph.contains(dependency) {
try!(self.resolve_task(dependency));
}
}
Ok(())
}
fn runtime(&self) -> Runtime {
self.runtime.as_ref().unwrap().clone()
}
}
| always_run | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.