file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
imp.rs | use super::{
anyhow, env, env_logger, remove_file, DateTime, PathBuf, Receiver, RefCell, Releaser, Result,
Url, Utc, Version, UPDATE_INTERVAL,
};
use crate::Updater;
use std::cell::Cell;
use std::cell::Ref;
use std::cell::RefMut;
use std::path::Path;
use std::sync::mpsc;
pub(super) const LATEST_UPDATE_INFO_CACHE_FN_ASYNC: &str = "last_check_status_async.json";
// Payload that the worker thread will send back
type ReleasePayloadResult = Result<Option<UpdateInfo>>;
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct UpdaterState {
pub(super) last_check: Cell<Option<DateTime<Utc>>>,
current_version: Version,
avail_release: RefCell<Option<UpdateInfo>>,
#[serde(skip, default = "default_interval")]
update_interval: i64,
#[serde(skip)]
worker_state: RefCell<Option<MPSCState>>,
}
impl UpdaterState {
pub(super) fn current_version(&self) -> &Version {
&self.current_version
}
pub(super) fn set_version(&mut self, v: Version) {
self.current_version = v;
}
pub(super) fn latest_avail_version(&self) -> Option<Version> {
self.avail_release
.borrow()
.as_ref()
.map(|ui| ui.version().clone())
}
pub(super) fn borrow_worker(&self) -> Ref<'_, Option<MPSCState>> {
self.worker_state.borrow()
}
pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> {
self.worker_state.borrow_mut()
}
pub(super) fn download_url(&self) -> Option<Url> {
self.avail_release
.borrow()
.as_ref()
.map(|info| info.downloadable_url.clone())
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub(super) struct UpdateInfo {
// Latest version available from github or releaser
pub version: Version,
pub fetched_at: Option<DateTime<Utc>>,
// Link to use to download the above version
pub downloadable_url: Url,
}
impl UpdateInfo {
pub fn new(v: Version, url: Url) -> Self {
UpdateInfo {
version: v,
fetched_at: None,
downloadable_url: url,
}
}
pub(super) fn version(&self) -> &Version {
&self.version
}
pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> {
self.fetched_at.as_ref()
}
pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) {
self.fetched_at = Some(date_time);
}
}
#[derive(Debug)]
pub(super) struct MPSCState {
// First successful call on rx.recv() will cache the results into this field
recvd_payload: RefCell<Option<ReleasePayloadResult>>,
// Receiver end of communication channel with worker thread
rx: RefCell<Option<Receiver<ReleasePayloadResult>>>,
}
impl MPSCState {
pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self {
MPSCState {
recvd_payload: RefCell::new(None),
rx: RefCell::new(Some(rx)),
}
}
}
impl<T> Updater<T>
where
T: Releaser + Send +'static,
{
pub(super) fn load_or_new(r: T) -> Result<Self> {
let _ = env_logger::try_init();
if let Ok(mut saved_state) = Self::load() {
// Use the version that workflow reports through environment variable
// This version takes priortiy over what we may have saved last time.
let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok());
if let Some(v) = env_ver {
saved_state.current_version = v;
}
Ok(Updater {
state: saved_state,
releaser: RefCell::new(r),
})
} else {
let current_version = env::workflow_version()
.map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?;
let state = UpdaterState {
current_version,
last_check: Cell::new(None),
avail_release: RefCell::new(None),
worker_state: RefCell::new(None),
update_interval: UPDATE_INTERVAL,
};
let updater = Updater {
state,
releaser: RefCell::new(r),
};
updater.save()?;
Ok(updater)
}
}
pub(super) fn last_check(&self) -> Option<DateTime<Utc>> {
self.state.last_check.get()
}
pub(super) fn set_last_check(&self, t: DateTime<Utc>) {
self.state.last_check.set(Some(t));
}
pub(super) fn update_interval(&self) -> i64 {
self.state.update_interval
}
pub(super) fn set_update_interval(&mut self, t: i64) |
fn load() -> Result<UpdaterState> {
let data_file_path = Self::build_data_fn()?;
crate::Data::load_from_file(data_file_path)
.ok_or_else(|| anyhow!("cannot load cached state of updater"))
}
// Save updater's state
pub(super) fn save(&self) -> Result<()> {
let data_file_path = Self::build_data_fn()?;
crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| {
let _r = remove_file(data_file_path);
e
})
}
pub(super) fn start_releaser_worker(
&self,
tx: mpsc::Sender<ReleasePayloadResult>,
p: PathBuf,
) -> Result<()> {
use std::thread;
let releaser = (*self.releaser.borrow()).clone();
thread::Builder::new().spawn(move || {
debug!("other thread: starting in updater thread");
let talk_to_mother = || -> Result<()> {
let (v, url) = releaser.latest_release()?;
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(Utc::now());
let payload = Some(info);
Self::write_last_check_status(&p, &payload)?;
tx.send(Ok(payload))?;
Ok(())
};
let outcome = talk_to_mother();
debug!("other thread: finished checking releaser status");
if let Err(error) = outcome {
tx.send(Err(error))
.expect("could not send error from thread");
}
})?;
Ok(())
}
// write version of latest avail. release (if any) to a cache file
pub(super) fn write_last_check_status(
p: &Path,
updater_info: &Option<UpdateInfo>,
) -> Result<()> {
crate::Data::save_to_file(p, updater_info).map_err(|e| {
let _r = remove_file(p);
e
})
}
// read version of latest avail. release (if any) from a cache file
pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> {
crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path"))
}
pub(super) fn build_data_fn() -> Result<PathBuf> {
let workflow_name = env::workflow_name()
.unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string())
.chars()
.map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
.collect::<String>();
env::workflow_cache()
.ok_or_else(|| {
anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?")
})
.and_then(|mut data_path| {
env::workflow_uid()
.ok_or_else(|| anyhow!("missing env variable for uid"))
.map(|ref uid| {
let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat();
data_path.push(filename);
data_path
})
})
}
pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> {
self.state
.worker_state
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() method first."))
.and_then(|mpsc| {
if mpsc.recvd_payload.borrow().is_none() {
// No payload received yet, try to talk to worker thread
mpsc.rx
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() correctly!"))
.and_then(|rx| {
let rr = if try_flag {
// don't block while trying to receive
rx.try_recv().map_err(|e| anyhow!(e.to_string()))
} else {
// block while waiting to receive
rx.recv().map_err(|e| anyhow!(e.to_string()))
};
rr.and_then(|msg| {
let msg_status = msg.map(|update_info| {
// received good message, update cache for received payload
*self.state.avail_release.borrow_mut() = update_info.clone();
// update last_check if received info is newer than last_check
update_info.as_ref().map(|ui| {
ui.fetched_at().map(|fetched_time| {
if self.last_check().is_none()
|| self.last_check().as_ref().unwrap()
< fetched_time
{
self.set_last_check(*fetched_time);
}
})
});
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info));
});
// save state regardless of content of msg
self.save()?;
msg_status?;
Ok(())
})
})?;
}
Ok(())
})?;
Ok(self
.state
.avail_release
.borrow()
.as_ref()
.map_or(false, |release| *self.current_version() < release.version))
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_async is deprecated. use init()")]
pub(super) fn _update_ready_async(&self) -> Result<bool> {
let worker_state = self.state.worker_state.borrow();
assert!(worker_state.is_some(), "you need to use init first");
let mpsc = worker_state.as_ref().expect("no worker_state");
if mpsc.recvd_payload.borrow().is_none() {
let rx_option = mpsc.rx.borrow();
let rx = rx_option.as_ref().unwrap();
let rr = rx.recv();
if rr.is_ok() {
let msg = rr.as_ref().unwrap();
if msg.is_ok() {
let update_info = msg.as_ref().unwrap();
*self.state.avail_release.borrow_mut() = update_info.clone();
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info.clone()));
} else {
return Err(anyhow!(format!("{:?}", msg.as_ref().unwrap_err())));
}
self.save()?;
} else {
eprintln!("{:?}", rr);
return Err(anyhow!(format!("{:?}", rr)));
}
}
if let Some(ref updater_info) = *self.state.avail_release.borrow() {
if *self.current_version() < updater_info.version {
Ok(true)
} else {
Ok(false)
}
} else {
Ok(false)
}
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_sync is deprecated. use init()")]
pub(super) fn _update_ready_sync(&self) -> Result<bool> {
// A None value for last_check indicates that workflow is being run for first time.
// Thus we update last_check to now and just save the updater state without asking
// Releaser to do a remote call/check for us since we assume that user just downloaded
// the workflow.
const LATEST_UPDATE_INFO_CACHE_FN: &str = "last_check_status.json";
// file for status of last update check
let p = Self::build_data_fn()?.with_file_name(LATEST_UPDATE_INFO_CACHE_FN);
// make a network call to see if a newer version is avail.
// save the result of call to cache file.
let ask_releaser_for_update = || -> Result<bool> {
let (v, url) = self.releaser.borrow().latest_release()?;
let update_avail = *self.current_version() < v;
let now = Utc::now();
let payload = {
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(now);
Some(info)
};
self.set_last_check(now);
Self::write_last_check_status(&p, &payload)?;
*self.state.avail_release.borrow_mut() = payload;
self.save()?;
Ok(update_avail)
};
// if first time checking, just update the updater's timestamp, no network call
if self.last_check().is_none() {
self.set_last_check(Utc::now());
self.save()?;
Ok(false)
} else if self.due_to_check() {
// it's time to talk to remote server
ask_releaser_for_update()
} else {
Self::read_last_check_status(&p)
.map(|last_check_status| {
last_check_status.map_or(false, |last_update_info| {
*self.current_version() < last_update_info.version
})
//.unwrap_or(false)
})
.or(Ok(false))
}
}
}
pub(super) fn default_interval() -> i64 {
UPDATE_INTERVAL
}
| {
self.state.update_interval = t;
} | identifier_body |
imp.rs | use super::{
anyhow, env, env_logger, remove_file, DateTime, PathBuf, Receiver, RefCell, Releaser, Result,
Url, Utc, Version, UPDATE_INTERVAL,
};
use crate::Updater;
use std::cell::Cell;
use std::cell::Ref;
use std::cell::RefMut;
use std::path::Path;
use std::sync::mpsc;
pub(super) const LATEST_UPDATE_INFO_CACHE_FN_ASYNC: &str = "last_check_status_async.json";
// Payload that the worker thread will send back
type ReleasePayloadResult = Result<Option<UpdateInfo>>;
#[derive(Debug, Serialize, Deserialize)]
pub(super) struct UpdaterState {
pub(super) last_check: Cell<Option<DateTime<Utc>>>,
current_version: Version,
avail_release: RefCell<Option<UpdateInfo>>,
#[serde(skip, default = "default_interval")]
update_interval: i64,
#[serde(skip)]
worker_state: RefCell<Option<MPSCState>>,
}
impl UpdaterState {
pub(super) fn current_version(&self) -> &Version {
&self.current_version
}
pub(super) fn set_version(&mut self, v: Version) {
self.current_version = v;
}
pub(super) fn latest_avail_version(&self) -> Option<Version> {
self.avail_release
.borrow()
.as_ref()
.map(|ui| ui.version().clone())
}
pub(super) fn borrow_worker(&self) -> Ref<'_, Option<MPSCState>> {
self.worker_state.borrow()
}
pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> {
self.worker_state.borrow_mut()
}
pub(super) fn download_url(&self) -> Option<Url> {
self.avail_release
.borrow()
.as_ref()
.map(|info| info.downloadable_url.clone())
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub(super) struct UpdateInfo {
// Latest version available from github or releaser
pub version: Version,
pub fetched_at: Option<DateTime<Utc>>,
// Link to use to download the above version
pub downloadable_url: Url,
}
impl UpdateInfo {
pub fn new(v: Version, url: Url) -> Self {
UpdateInfo {
version: v,
fetched_at: None,
downloadable_url: url,
}
}
pub(super) fn version(&self) -> &Version {
&self.version
}
pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> {
self.fetched_at.as_ref()
}
pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) {
self.fetched_at = Some(date_time);
}
}
#[derive(Debug)]
pub(super) struct MPSCState {
// First successful call on rx.recv() will cache the results into this field
recvd_payload: RefCell<Option<ReleasePayloadResult>>,
// Receiver end of communication channel with worker thread
rx: RefCell<Option<Receiver<ReleasePayloadResult>>>,
}
impl MPSCState {
pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self {
MPSCState {
recvd_payload: RefCell::new(None),
rx: RefCell::new(Some(rx)),
}
}
}
impl<T> Updater<T>
where
T: Releaser + Send +'static,
{
pub(super) fn load_or_new(r: T) -> Result<Self> {
let _ = env_logger::try_init();
if let Ok(mut saved_state) = Self::load() {
// Use the version that workflow reports through environment variable
// This version takes priortiy over what we may have saved last time.
let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok());
if let Some(v) = env_ver {
saved_state.current_version = v;
}
Ok(Updater {
state: saved_state,
releaser: RefCell::new(r),
})
} else |
}
pub(super) fn last_check(&self) -> Option<DateTime<Utc>> {
self.state.last_check.get()
}
pub(super) fn set_last_check(&self, t: DateTime<Utc>) {
self.state.last_check.set(Some(t));
}
pub(super) fn update_interval(&self) -> i64 {
self.state.update_interval
}
pub(super) fn set_update_interval(&mut self, t: i64) {
self.state.update_interval = t;
}
fn load() -> Result<UpdaterState> {
let data_file_path = Self::build_data_fn()?;
crate::Data::load_from_file(data_file_path)
.ok_or_else(|| anyhow!("cannot load cached state of updater"))
}
// Save updater's state
pub(super) fn save(&self) -> Result<()> {
let data_file_path = Self::build_data_fn()?;
crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| {
let _r = remove_file(data_file_path);
e
})
}
pub(super) fn start_releaser_worker(
&self,
tx: mpsc::Sender<ReleasePayloadResult>,
p: PathBuf,
) -> Result<()> {
use std::thread;
let releaser = (*self.releaser.borrow()).clone();
thread::Builder::new().spawn(move || {
debug!("other thread: starting in updater thread");
let talk_to_mother = || -> Result<()> {
let (v, url) = releaser.latest_release()?;
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(Utc::now());
let payload = Some(info);
Self::write_last_check_status(&p, &payload)?;
tx.send(Ok(payload))?;
Ok(())
};
let outcome = talk_to_mother();
debug!("other thread: finished checking releaser status");
if let Err(error) = outcome {
tx.send(Err(error))
.expect("could not send error from thread");
}
})?;
Ok(())
}
// write version of latest avail. release (if any) to a cache file
pub(super) fn write_last_check_status(
p: &Path,
updater_info: &Option<UpdateInfo>,
) -> Result<()> {
crate::Data::save_to_file(p, updater_info).map_err(|e| {
let _r = remove_file(p);
e
})
}
// read version of latest avail. release (if any) from a cache file
pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> {
crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path"))
}
pub(super) fn build_data_fn() -> Result<PathBuf> {
let workflow_name = env::workflow_name()
.unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string())
.chars()
.map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
.collect::<String>();
env::workflow_cache()
.ok_or_else(|| {
anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?")
})
.and_then(|mut data_path| {
env::workflow_uid()
.ok_or_else(|| anyhow!("missing env variable for uid"))
.map(|ref uid| {
let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat();
data_path.push(filename);
data_path
})
})
}
pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> {
self.state
.worker_state
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() method first."))
.and_then(|mpsc| {
if mpsc.recvd_payload.borrow().is_none() {
// No payload received yet, try to talk to worker thread
mpsc.rx
.borrow()
.as_ref()
.ok_or_else(|| anyhow!("you need to use init() correctly!"))
.and_then(|rx| {
let rr = if try_flag {
// don't block while trying to receive
rx.try_recv().map_err(|e| anyhow!(e.to_string()))
} else {
// block while waiting to receive
rx.recv().map_err(|e| anyhow!(e.to_string()))
};
rr.and_then(|msg| {
let msg_status = msg.map(|update_info| {
// received good message, update cache for received payload
*self.state.avail_release.borrow_mut() = update_info.clone();
// update last_check if received info is newer than last_check
update_info.as_ref().map(|ui| {
ui.fetched_at().map(|fetched_time| {
if self.last_check().is_none()
|| self.last_check().as_ref().unwrap()
< fetched_time
{
self.set_last_check(*fetched_time);
}
})
});
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info));
});
// save state regardless of content of msg
self.save()?;
msg_status?;
Ok(())
})
})?;
}
Ok(())
})?;
Ok(self
.state
.avail_release
.borrow()
.as_ref()
.map_or(false, |release| *self.current_version() < release.version))
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_async is deprecated. use init()")]
pub(super) fn _update_ready_async(&self) -> Result<bool> {
let worker_state = self.state.worker_state.borrow();
assert!(worker_state.is_some(), "you need to use init first");
let mpsc = worker_state.as_ref().expect("no worker_state");
if mpsc.recvd_payload.borrow().is_none() {
let rx_option = mpsc.rx.borrow();
let rx = rx_option.as_ref().unwrap();
let rr = rx.recv();
if rr.is_ok() {
let msg = rr.as_ref().unwrap();
if msg.is_ok() {
let update_info = msg.as_ref().unwrap();
*self.state.avail_release.borrow_mut() = update_info.clone();
*mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info.clone()));
} else {
return Err(anyhow!(format!("{:?}", msg.as_ref().unwrap_err())));
}
self.save()?;
} else {
eprintln!("{:?}", rr);
return Err(anyhow!(format!("{:?}", rr)));
}
}
if let Some(ref updater_info) = *self.state.avail_release.borrow() {
if *self.current_version() < updater_info.version {
Ok(true)
} else {
Ok(false)
}
} else {
Ok(false)
}
}
#[allow(dead_code)]
#[deprecated(note = "update_ready_sync is deprecated. use init()")]
pub(super) fn _update_ready_sync(&self) -> Result<bool> {
// A None value for last_check indicates that workflow is being run for first time.
// Thus we update last_check to now and just save the updater state without asking
// Releaser to do a remote call/check for us since we assume that user just downloaded
// the workflow.
const LATEST_UPDATE_INFO_CACHE_FN: &str = "last_check_status.json";
// file for status of last update check
let p = Self::build_data_fn()?.with_file_name(LATEST_UPDATE_INFO_CACHE_FN);
// make a network call to see if a newer version is avail.
// save the result of call to cache file.
let ask_releaser_for_update = || -> Result<bool> {
let (v, url) = self.releaser.borrow().latest_release()?;
let update_avail = *self.current_version() < v;
let now = Utc::now();
let payload = {
let mut info = UpdateInfo::new(v, url);
info.set_fetched_at(now);
Some(info)
};
self.set_last_check(now);
Self::write_last_check_status(&p, &payload)?;
*self.state.avail_release.borrow_mut() = payload;
self.save()?;
Ok(update_avail)
};
// if first time checking, just update the updater's timestamp, no network call
if self.last_check().is_none() {
self.set_last_check(Utc::now());
self.save()?;
Ok(false)
} else if self.due_to_check() {
// it's time to talk to remote server
ask_releaser_for_update()
} else {
Self::read_last_check_status(&p)
.map(|last_check_status| {
last_check_status.map_or(false, |last_update_info| {
*self.current_version() < last_update_info.version
})
//.unwrap_or(false)
})
.or(Ok(false))
}
}
}
pub(super) fn default_interval() -> i64 {
UPDATE_INTERVAL
}
| {
let current_version = env::workflow_version()
.map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?;
let state = UpdaterState {
current_version,
last_check: Cell::new(None),
avail_release: RefCell::new(None),
worker_state: RefCell::new(None),
update_interval: UPDATE_INTERVAL,
};
let updater = Updater {
state,
releaser: RefCell::new(r),
};
updater.save()?;
Ok(updater)
} | conditional_block |
pbms.rs | URL")?;
Ok(())
}
/// Replace the {foo} placeholders in repo paths.
///
/// {version} is replaced with the Fuchsia SDK version string.
/// {sdk.root} is replaced with the SDK directory path.
fn expand_placeholders(uri: &str, version: &str, sdk_root: &str) -> Result<url::Url> {
let expanded = uri.replace("{version}", version).replace("{sdk.root}", sdk_root);
if uri.contains(":") {
Ok(url::Url::parse(&expanded).with_context(|| format!("url parse {:?}", expanded))?)
} else {
// If there's no colon, assume it's a local path.
let base_url = url::Url::parse("file:/").context("parsing minimal file URL")?;
Ok(url::Url::options()
.base_url(Some(&base_url))
.parse(&expanded)
.with_context(|| format!("url parse {:?}", expanded))?)
}
}
/// Get a list of the urls in the CONFIG_METADATA config with the placeholders
/// expanded.
///
/// I.e. run expand_placeholders() on each element in CONFIG_METADATA.
pub(crate) async fn pbm_repo_list(sdk: &ffx_config::Sdk) -> Result<Vec<url::Url>> {
let version = match sdk.get_version() {
SdkVersion::Version(version) => version,
SdkVersion::InTree => "",
SdkVersion::Unknown => bail!("Unable to determine SDK version vs. in-tree"),
};
let sdk_root = sdk.get_path_prefix();
let repos: Vec<String> = ffx_config::get::<Vec<String>, _>(CONFIG_METADATA)
.await
.context("get config CONFIG_METADATA")?;
let repos: Vec<url::Url> = repos
.iter()
.map(|s| {
expand_placeholders(s, &version, &sdk_root.to_string_lossy())
.expect(&format!("URL for repo {:?}", s))
})
.collect();
Ok(repos)
}
/// Retrieve the path portion of a "file:/" url. Non-file-paths return None.
///
/// If the url has no scheme, the whole string is returned.
/// E.g.
/// - "/foo/bar" -> Some("/foo/bar")
/// - "file://foo/bar" -> Some("/foo/bar")
/// - "http://foo/bar" -> None
pub(crate) fn path_from_file_url(product_url: &url::Url) -> Option<PathBuf> {
if product_url.scheme() == "file" {
product_url.to_file_path().ok()
} else {
None
}
}
/// Get a list of product bundle entry names from `path`.
///
/// These are not full product_urls, but just the name that is used in the
/// fragment portion of the URL.
pub(crate) fn pb_names_from_path(path: &Path) -> Result<Vec<String>> {
let mut entries = Entries::new();
entries.add_from_path(path).context("adding from path")?;
Ok(entries
.iter()
.filter_map(|entry| match entry {
Metadata::ProductBundleV1(_) => Some(entry.name().to_string()),
_ => None,
})
.collect::<Vec<String>>())
}
/// Helper function for determining local path.
///
/// if `dir` return a directory path, else may return a glob (file) path.
pub(crate) async fn local_path_helper(
product_url: &url::Url,
add_dir: &str,
dir: bool,
sdk_root: &Path,
) -> Result<PathBuf> {
assert!(!product_url.fragment().is_none());
if let Some(path) = &path_from_file_url(product_url) {
if dir {
// TODO(fxbug.dev/98009): Unify the file layout between local and remote
// product bundles to avoid this hack.
if path.starts_with(sdk_root) {
Ok(sdk_root.to_path_buf())
} else {
Ok(path.parent().expect("parent of file path").to_path_buf())
}
} else {
Ok(path.to_path_buf())
}
} else {
let url = url_sans_fragment(&product_url)?;
Ok(get_product_dir(&url).await?.join(add_dir))
}
}
/// Retrieve the storage directory path from the config.
pub async fn get_storage_dir() -> Result<PathBuf> {
let storage_path: PathBuf =
ffx_config::get(CONFIG_STORAGE_PATH).await.context("getting CONFIG_STORAGE_PATH")?;
Ok(storage_path)
}
/// Retrieve the product directory path from the config.
///
/// This is the storage path plus a hash of the `product_url` provided.
pub async fn get_product_dir(product_url: &url::Url) -> Result<PathBuf> {
Ok(get_storage_dir().await?.join(pb_dir_name(product_url)))
}
/// Separate the URL on the last "#" character.
///
/// If no "#" is found, use the whole input as the url.
///
/// "file://foo#bar" -> "file://foo"
/// "file://foo" -> "file://foo"
pub(crate) fn url_sans_fragment(product_url: &url::Url) -> Result<url::Url> {
let mut product_url = product_url.to_owned();
product_url.set_fragment(None);
Ok(product_url)
}
/// Helper for `get_product_data()`, see docs there.
pub(crate) async fn get_product_data_from_gcs<I>(
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
tracing::debug!("get_product_data_from_gcs {:?} to {:?}", product_url, local_repo_dir);
assert_eq!(product_url.scheme(), GS_SCHEME);
let product_name = product_url.fragment().expect("URL with trailing product_name fragment.");
let url = url_sans_fragment(product_url)?;
fetch_product_metadata(
&url,
local_repo_dir,
auth_flow,
&mut |_d, _f| Ok(ProgressResponse::Continue),
ui,
)
.await
.context("fetching metadata")?;
let file_path = local_repo_dir.join("product_bundles.json");
if!file_path.is_file() {
bail!("product_bundles.json not found {:?}.", file_path);
}
let mut entries = Entries::new();
entries.add_from_path(&file_path).context("adding entries from gcs")?;
let product_bundle = find_product_bundle(&entries, &Some(product_name.to_string()))
.context("finding product bundle")?;
fetch_data_for_product_bundle_v1(&product_bundle, &url, local_repo_dir, auth_flow, ui).await
}
/// Helper for `get_product_data()`, see docs there.
pub async fn fetch_data_for_product_bundle_v1<I>(
product_bundle: &sdk_metadata::ProductBundleV1,
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
// Handy debugging switch to disable images download.
let temp_dir = TempDir::new_in(&local_repo_dir)?;
let temp_path = temp_dir.path();
if true {
let start = std::time::Instant::now();
tracing::info!(
"Getting product data for {:?} to {:?}",
product_bundle.name,
local_repo_dir
);
let local_dir = temp_path.join("images");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
for image in &product_bundle.images {
tracing::debug!("image {:?}", image);
let base_url =
make_remote_url(product_url, &image.base_uri).context("image.base_uri")?;
if!exists_in_gcs(&base_url.as_str(), auth_flow, ui).await? {
tracing::warn!("The base_uri does not exist: {}", base_url);
}
fetch_by_format(
&image.format,
&base_url,
&local_dir,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Image data", /*at=*/ 1, /*of=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| format!("fetching images for {}.", product_bundle.name))?;
}
tracing::debug!("Total fetch images runtime {} seconds.", start.elapsed().as_secs_f32());
}
// Handy debugging switch to disable packages download.
if true {
let start = std::time::Instant::now();
let local_dir = temp_path.join("packages");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
tracing::info!(
"Getting package data for {:?}, local_dir {:?}",
product_bundle.name,
local_dir
);
fetch_package_repository_from_mirrors(
product_url,
&local_dir,
&product_bundle.packages,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Package data", /*at=*/ 2, /*at=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| {
format!(
"fetch_package_repository_from_mirrors {:?}, local_dir {:?}",
product_url, local_dir
)
})?;
tracing::debug!("Total fetch packages runtime {} seconds.", start.elapsed().as_secs_f32());
}
let final_name = local_repo_dir.join(&product_bundle.name);
tracing::info!("Download of product data for {:?} is complete.", product_bundle.name);
tracing::info!("Renaming temporary directory to {}", final_name.display());
fs::rename(temp_path, final_name).expect("Renaming temp directory failed.");
tracing::info!("Data written to \"{}\".", local_repo_dir.display());
Ok(())
}
/// Generate a (likely) unique name for the URL.
///
/// URLs don't always make good file paths.
pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String {
let mut gcs_url = gcs_url.to_owned();
gcs_url.set_fragment(None);
use std::collections::hash_map::DefaultHasher;
use std::hash::Hash;
use std::hash::Hasher;
let mut s = DefaultHasher::new();
gcs_url.as_str().hash(&mut s);
let out = s.finish();
tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out);
format!("{}", out)
}
/// Download and expand data.
///
/// For a directory, all files in the directory are downloaded.
/// For a.tgz file, the file is downloaded and expanded.
async fn fetch_by_format<F, I>(
format: &str,
uri: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_by_format");
match format {
"files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await,
_ =>
// The schema currently defines only "files" or "tgz" (see RFC-100).
// This error could be a typo in the product bundle or a new image
// format has been added and this code needs an update.
{
bail!(
"Unexpected image format ({:?}) in product bundle. \
Supported formats are \"files\" and \"tgz\". \
Please report as a bug.",
format,
)
}
}
}
/// Download data from any of the supported schemes listed in RFC-100, Product
/// Bundle, "bundle_uri".
///
/// Currently: "pattern": "^(?:http|https|gs|file):\/\/"
pub(crate) async fn fetch_bundle_uri<F, I>(
product_url: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_bundle_uri");
if product_url.scheme() == GS_SCHEME {
fetch_from_gcs(product_url.as_str(), local_dir, auth_flow, progress, ui)
.await
.context("Downloading from GCS.")?;
} else if product_url.scheme() == "http" || product_url.scheme() == "https" {
fetch_from_web(product_url, local_dir, progress, ui)
.await
.context("fetching from http(s)")?;
} else if let Some(_) = &path_from_file_url(product_url) {
// Since the file is already local, no fetch is necessary.
tracing::debug!("Found local file path {:?}", product_url);
} else {
bail!("Unexpected URI scheme in ({:?})", product_url);
}
Ok(())
}
async fn fetch_from_web<F, I>(
product_uri: &url::Url,
local_dir: &Path,
progress: &F,
_ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_from_web");
let name = if let Some((_, name)) = product_uri.path().rsplit_once('/') {
name
} else {
unimplemented!()
};
if name.is_empty() {
unimplemented!("downloading a directory from a web server is not implemented");
}
let res = fuchsia_hyper::new_client()
.get(hyper::Uri::from_maybe_shared(product_uri.to_string())?)
.await
.with_context(|| format!("Requesting {}", product_uri))?;
match res.status() {
StatusCode::OK => {}
StatusCode::NOT_FOUND => {
bail!("{} not found", product_uri);
}
status => {
bail!("Unexpected HTTP status downloading {}: {}", product_uri, status);
}
}
let mut at: u64 = 0;
let length = if res.headers().contains_key(CONTENT_LENGTH) {
res.headers()
.get(CONTENT_LENGTH)
.context("getting content length")?
.to_str()?
.parse::<u64>()
.context("parsing content length")?
} else {
0
};
std::fs::create_dir_all(local_dir)
.with_context(|| format!("Creating {}", local_dir.display()))?;
let path = local_dir.join(name);
let mut file =
File::create(&path).await.with_context(|| format!("Creating {}", path.display()))?;
let mut stream = res.into_body();
let mut of = length;
// Throttle the progress UI updates to avoid burning CPU on changes | let mut throttle = Throttle::from_duration(std::time::Duration::from_millis(500));
let url = product_uri.to_string();
while let Some(chunk) =
stream.try_next().await.with_context(|| format!("Downloading {}", product_uri))?
{
file.write_all(&chunk).await.with_context(|| format!("Writing {}", path.display()))?;
at += chunk.len() as u64;
if at > of {
of = at;
}
if throttle.is_ready() {
match progress(
DirectoryProgress { name: &url, at: 0, of: 1, units: "files" },
FileProgress { name: &url, at, of, units: "bytes" },
)
.context("rendering progress")?
{
ProgressResponse::Cancel => break,
_ => (),
}
}
}
file.close().await.with_context(|| format!("Closing {}", path.display()))?;
Ok(())
}
/// If internal_url is a file scheme, join `product_url` and `internal_url`.
/// Otherwise, return `internal_url`.
pub(crate) fn make_remote_url(product_url: &url::Url, internal_url: &str) -> Result<url::Url> {
let result = if let Some(remainder) = internal_url.strip_prefix("file:/") {
// Note: The product_url must either be a path to the product_bundle.json file or to the
// parent directory (with a trailing slash).
product_url.join(remainder)?
} else {
url::Url::parse(&internal_url).with_context(|| format!("parsing url {:?}", internal_url))?
};
tracing::debug!(
"make_remote_url product_url {:?}, internal_url {:?}, result {:?}",
product_url,
internal_url,
result
);
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
#[fuchsia_async::run_singlethreaded(test)]
async fn test_path_from_file_url() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let input = url::Url::parse("file://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let temp_dir = tempfile::TempDir::new().expect("temp dir");
let base_url = url::Url::from_directory_path(temp_dir.path().join("a/b/c/d")).expect("url");
let input =
url::Url::options().base_url(Some(&base_url)).parse("../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(temp_dir.path().join("a/b/foo").to_path_buf()));
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_url_sans_fragment() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
let input = url::Url::parse("fake://foo").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
}
// Disabling this test until a test config can be modified without altering
// the local user's config.
#[ignore]
#[fuchsia_async::run_singlethreaded(test)]
async fn test_local_path_helper() {
let sdk_prefix = PathBuf::from("/"); // this is only used for file paths
let url = url::Url::parse("fake://foo#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
// Note that the hash will be the same even though the fragment is
// different.
let url = url::Url::parse("fake://foo#blah").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
let url = url::Url::parse("gs://foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/16042545670964745983/foo"));
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah");
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ false, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah/*.json");
}
#[fuchsia_async::run_singlethreaded(test)]
#[should_panic(expected = "Unexpected image format")]
async fn test_fetch_by_format() {
let url = url::Url::parse("fake://foo").expect("url");
let | // the user will have trouble seeing anyway. Without throttling,
// around 20% of the execution time can be spent updating the
// progress UI. The throttle makes the overhead negligible. | random_line_split |
pbms.rs | ")?;
Ok(())
}
/// Replace the {foo} placeholders in repo paths.
///
/// {version} is replaced with the Fuchsia SDK version string.
/// {sdk.root} is replaced with the SDK directory path.
fn expand_placeholders(uri: &str, version: &str, sdk_root: &str) -> Result<url::Url> {
let expanded = uri.replace("{version}", version).replace("{sdk.root}", sdk_root);
if uri.contains(":") {
Ok(url::Url::parse(&expanded).with_context(|| format!("url parse {:?}", expanded))?)
} else {
// If there's no colon, assume it's a local path.
let base_url = url::Url::parse("file:/").context("parsing minimal file URL")?;
Ok(url::Url::options()
.base_url(Some(&base_url))
.parse(&expanded)
.with_context(|| format!("url parse {:?}", expanded))?)
}
}
/// Get a list of the urls in the CONFIG_METADATA config with the placeholders
/// expanded.
///
/// I.e. run expand_placeholders() on each element in CONFIG_METADATA.
pub(crate) async fn pbm_repo_list(sdk: &ffx_config::Sdk) -> Result<Vec<url::Url>> {
let version = match sdk.get_version() {
SdkVersion::Version(version) => version,
SdkVersion::InTree => "",
SdkVersion::Unknown => bail!("Unable to determine SDK version vs. in-tree"),
};
let sdk_root = sdk.get_path_prefix();
let repos: Vec<String> = ffx_config::get::<Vec<String>, _>(CONFIG_METADATA)
.await
.context("get config CONFIG_METADATA")?;
let repos: Vec<url::Url> = repos
.iter()
.map(|s| {
expand_placeholders(s, &version, &sdk_root.to_string_lossy())
.expect(&format!("URL for repo {:?}", s))
})
.collect();
Ok(repos)
}
/// Retrieve the path portion of a "file:/" url. Non-file-paths return None.
///
/// If the url has no scheme, the whole string is returned.
/// E.g.
/// - "/foo/bar" -> Some("/foo/bar")
/// - "file://foo/bar" -> Some("/foo/bar")
/// - "http://foo/bar" -> None
pub(crate) fn path_from_file_url(product_url: &url::Url) -> Option<PathBuf> {
if product_url.scheme() == "file" {
product_url.to_file_path().ok()
} else {
None
}
}
/// Get a list of product bundle entry names from `path`.
///
/// These are not full product_urls, but just the name that is used in the
/// fragment portion of the URL.
pub(crate) fn pb_names_from_path(path: &Path) -> Result<Vec<String>> {
let mut entries = Entries::new();
entries.add_from_path(path).context("adding from path")?;
Ok(entries
.iter()
.filter_map(|entry| match entry {
Metadata::ProductBundleV1(_) => Some(entry.name().to_string()),
_ => None,
})
.collect::<Vec<String>>())
}
/// Helper function for determining local path.
///
/// if `dir` return a directory path, else may return a glob (file) path.
pub(crate) async fn local_path_helper(
product_url: &url::Url,
add_dir: &str,
dir: bool,
sdk_root: &Path,
) -> Result<PathBuf> {
assert!(!product_url.fragment().is_none());
if let Some(path) = &path_from_file_url(product_url) {
if dir {
// TODO(fxbug.dev/98009): Unify the file layout between local and remote
// product bundles to avoid this hack.
if path.starts_with(sdk_root) {
Ok(sdk_root.to_path_buf())
} else {
Ok(path.parent().expect("parent of file path").to_path_buf())
}
} else {
Ok(path.to_path_buf())
}
} else {
let url = url_sans_fragment(&product_url)?;
Ok(get_product_dir(&url).await?.join(add_dir))
}
}
/// Retrieve the storage directory path from the config.
pub async fn get_storage_dir() -> Result<PathBuf> {
let storage_path: PathBuf =
ffx_config::get(CONFIG_STORAGE_PATH).await.context("getting CONFIG_STORAGE_PATH")?;
Ok(storage_path)
}
/// Retrieve the product directory path from the config.
///
/// This is the storage path plus a hash of the `product_url` provided.
pub async fn get_product_dir(product_url: &url::Url) -> Result<PathBuf> |
/// Separate the URL on the last "#" character.
///
/// If no "#" is found, use the whole input as the url.
///
/// "file://foo#bar" -> "file://foo"
/// "file://foo" -> "file://foo"
pub(crate) fn url_sans_fragment(product_url: &url::Url) -> Result<url::Url> {
let mut product_url = product_url.to_owned();
product_url.set_fragment(None);
Ok(product_url)
}
/// Helper for `get_product_data()`, see docs there.
pub(crate) async fn get_product_data_from_gcs<I>(
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
tracing::debug!("get_product_data_from_gcs {:?} to {:?}", product_url, local_repo_dir);
assert_eq!(product_url.scheme(), GS_SCHEME);
let product_name = product_url.fragment().expect("URL with trailing product_name fragment.");
let url = url_sans_fragment(product_url)?;
fetch_product_metadata(
&url,
local_repo_dir,
auth_flow,
&mut |_d, _f| Ok(ProgressResponse::Continue),
ui,
)
.await
.context("fetching metadata")?;
let file_path = local_repo_dir.join("product_bundles.json");
if!file_path.is_file() {
bail!("product_bundles.json not found {:?}.", file_path);
}
let mut entries = Entries::new();
entries.add_from_path(&file_path).context("adding entries from gcs")?;
let product_bundle = find_product_bundle(&entries, &Some(product_name.to_string()))
.context("finding product bundle")?;
fetch_data_for_product_bundle_v1(&product_bundle, &url, local_repo_dir, auth_flow, ui).await
}
/// Helper for `get_product_data()`, see docs there.
pub async fn fetch_data_for_product_bundle_v1<I>(
product_bundle: &sdk_metadata::ProductBundleV1,
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
// Handy debugging switch to disable images download.
let temp_dir = TempDir::new_in(&local_repo_dir)?;
let temp_path = temp_dir.path();
if true {
let start = std::time::Instant::now();
tracing::info!(
"Getting product data for {:?} to {:?}",
product_bundle.name,
local_repo_dir
);
let local_dir = temp_path.join("images");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
for image in &product_bundle.images {
tracing::debug!("image {:?}", image);
let base_url =
make_remote_url(product_url, &image.base_uri).context("image.base_uri")?;
if!exists_in_gcs(&base_url.as_str(), auth_flow, ui).await? {
tracing::warn!("The base_uri does not exist: {}", base_url);
}
fetch_by_format(
&image.format,
&base_url,
&local_dir,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Image data", /*at=*/ 1, /*of=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| format!("fetching images for {}.", product_bundle.name))?;
}
tracing::debug!("Total fetch images runtime {} seconds.", start.elapsed().as_secs_f32());
}
// Handy debugging switch to disable packages download.
if true {
let start = std::time::Instant::now();
let local_dir = temp_path.join("packages");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
tracing::info!(
"Getting package data for {:?}, local_dir {:?}",
product_bundle.name,
local_dir
);
fetch_package_repository_from_mirrors(
product_url,
&local_dir,
&product_bundle.packages,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Package data", /*at=*/ 2, /*at=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| {
format!(
"fetch_package_repository_from_mirrors {:?}, local_dir {:?}",
product_url, local_dir
)
})?;
tracing::debug!("Total fetch packages runtime {} seconds.", start.elapsed().as_secs_f32());
}
let final_name = local_repo_dir.join(&product_bundle.name);
tracing::info!("Download of product data for {:?} is complete.", product_bundle.name);
tracing::info!("Renaming temporary directory to {}", final_name.display());
fs::rename(temp_path, final_name).expect("Renaming temp directory failed.");
tracing::info!("Data written to \"{}\".", local_repo_dir.display());
Ok(())
}
/// Generate a (likely) unique name for the URL.
///
/// URLs don't always make good file paths.
pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String {
let mut gcs_url = gcs_url.to_owned();
gcs_url.set_fragment(None);
use std::collections::hash_map::DefaultHasher;
use std::hash::Hash;
use std::hash::Hasher;
let mut s = DefaultHasher::new();
gcs_url.as_str().hash(&mut s);
let out = s.finish();
tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out);
format!("{}", out)
}
/// Download and expand data.
///
/// For a directory, all files in the directory are downloaded.
/// For a.tgz file, the file is downloaded and expanded.
async fn fetch_by_format<F, I>(
format: &str,
uri: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_by_format");
match format {
"files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await,
_ =>
// The schema currently defines only "files" or "tgz" (see RFC-100).
// This error could be a typo in the product bundle or a new image
// format has been added and this code needs an update.
{
bail!(
"Unexpected image format ({:?}) in product bundle. \
Supported formats are \"files\" and \"tgz\". \
Please report as a bug.",
format,
)
}
}
}
/// Download data from any of the supported schemes listed in RFC-100, Product
/// Bundle, "bundle_uri".
///
/// Currently: "pattern": "^(?:http|https|gs|file):\/\/"
pub(crate) async fn fetch_bundle_uri<F, I>(
product_url: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_bundle_uri");
if product_url.scheme() == GS_SCHEME {
fetch_from_gcs(product_url.as_str(), local_dir, auth_flow, progress, ui)
.await
.context("Downloading from GCS.")?;
} else if product_url.scheme() == "http" || product_url.scheme() == "https" {
fetch_from_web(product_url, local_dir, progress, ui)
.await
.context("fetching from http(s)")?;
} else if let Some(_) = &path_from_file_url(product_url) {
// Since the file is already local, no fetch is necessary.
tracing::debug!("Found local file path {:?}", product_url);
} else {
bail!("Unexpected URI scheme in ({:?})", product_url);
}
Ok(())
}
async fn fetch_from_web<F, I>(
product_uri: &url::Url,
local_dir: &Path,
progress: &F,
_ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_from_web");
let name = if let Some((_, name)) = product_uri.path().rsplit_once('/') {
name
} else {
unimplemented!()
};
if name.is_empty() {
unimplemented!("downloading a directory from a web server is not implemented");
}
let res = fuchsia_hyper::new_client()
.get(hyper::Uri::from_maybe_shared(product_uri.to_string())?)
.await
.with_context(|| format!("Requesting {}", product_uri))?;
match res.status() {
StatusCode::OK => {}
StatusCode::NOT_FOUND => {
bail!("{} not found", product_uri);
}
status => {
bail!("Unexpected HTTP status downloading {}: {}", product_uri, status);
}
}
let mut at: u64 = 0;
let length = if res.headers().contains_key(CONTENT_LENGTH) {
res.headers()
.get(CONTENT_LENGTH)
.context("getting content length")?
.to_str()?
.parse::<u64>()
.context("parsing content length")?
} else {
0
};
std::fs::create_dir_all(local_dir)
.with_context(|| format!("Creating {}", local_dir.display()))?;
let path = local_dir.join(name);
let mut file =
File::create(&path).await.with_context(|| format!("Creating {}", path.display()))?;
let mut stream = res.into_body();
let mut of = length;
// Throttle the progress UI updates to avoid burning CPU on changes
// the user will have trouble seeing anyway. Without throttling,
// around 20% of the execution time can be spent updating the
// progress UI. The throttle makes the overhead negligible.
let mut throttle = Throttle::from_duration(std::time::Duration::from_millis(500));
let url = product_uri.to_string();
while let Some(chunk) =
stream.try_next().await.with_context(|| format!("Downloading {}", product_uri))?
{
file.write_all(&chunk).await.with_context(|| format!("Writing {}", path.display()))?;
at += chunk.len() as u64;
if at > of {
of = at;
}
if throttle.is_ready() {
match progress(
DirectoryProgress { name: &url, at: 0, of: 1, units: "files" },
FileProgress { name: &url, at, of, units: "bytes" },
)
.context("rendering progress")?
{
ProgressResponse::Cancel => break,
_ => (),
}
}
}
file.close().await.with_context(|| format!("Closing {}", path.display()))?;
Ok(())
}
/// If internal_url is a file scheme, join `product_url` and `internal_url`.
/// Otherwise, return `internal_url`.
pub(crate) fn make_remote_url(product_url: &url::Url, internal_url: &str) -> Result<url::Url> {
let result = if let Some(remainder) = internal_url.strip_prefix("file:/") {
// Note: The product_url must either be a path to the product_bundle.json file or to the
// parent directory (with a trailing slash).
product_url.join(remainder)?
} else {
url::Url::parse(&internal_url).with_context(|| format!("parsing url {:?}", internal_url))?
};
tracing::debug!(
"make_remote_url product_url {:?}, internal_url {:?}, result {:?}",
product_url,
internal_url,
result
);
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
#[fuchsia_async::run_singlethreaded(test)]
async fn test_path_from_file_url() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let input = url::Url::parse("file://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let temp_dir = tempfile::TempDir::new().expect("temp dir");
let base_url = url::Url::from_directory_path(temp_dir.path().join("a/b/c/d")).expect("url");
let input =
url::Url::options().base_url(Some(&base_url)).parse("../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(temp_dir.path().join("a/b/foo").to_path_buf()));
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_url_sans_fragment() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
let input = url::Url::parse("fake://foo").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
}
// Disabling this test until a test config can be modified without altering
// the local user's config.
#[ignore]
#[fuchsia_async::run_singlethreaded(test)]
async fn test_local_path_helper() {
let sdk_prefix = PathBuf::from("/"); // this is only used for file paths
let url = url::Url::parse("fake://foo#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
// Note that the hash will be the same even though the fragment is
// different.
let url = url::Url::parse("fake://foo#blah").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
let url = url::Url::parse("gs://foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/16042545670964745983/foo"));
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah");
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ false, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah/*.json");
}
#[fuchsia_async::run_singlethreaded(test)]
#[should_panic(expected = "Unexpected image format")]
async fn test_fetch_by_format() {
let url = url::Url::parse("fake://foo").expect("url");
| {
Ok(get_storage_dir().await?.join(pb_dir_name(product_url)))
} | identifier_body |
pbms.rs | ")?;
Ok(())
}
/// Replace the {foo} placeholders in repo paths.
///
/// {version} is replaced with the Fuchsia SDK version string.
/// {sdk.root} is replaced with the SDK directory path.
fn expand_placeholders(uri: &str, version: &str, sdk_root: &str) -> Result<url::Url> {
let expanded = uri.replace("{version}", version).replace("{sdk.root}", sdk_root);
if uri.contains(":") {
Ok(url::Url::parse(&expanded).with_context(|| format!("url parse {:?}", expanded))?)
} else {
// If there's no colon, assume it's a local path.
let base_url = url::Url::parse("file:/").context("parsing minimal file URL")?;
Ok(url::Url::options()
.base_url(Some(&base_url))
.parse(&expanded)
.with_context(|| format!("url parse {:?}", expanded))?)
}
}
/// Get a list of the urls in the CONFIG_METADATA config with the placeholders
/// expanded.
///
/// I.e. run expand_placeholders() on each element in CONFIG_METADATA.
pub(crate) async fn pbm_repo_list(sdk: &ffx_config::Sdk) -> Result<Vec<url::Url>> {
let version = match sdk.get_version() {
SdkVersion::Version(version) => version,
SdkVersion::InTree => "",
SdkVersion::Unknown => bail!("Unable to determine SDK version vs. in-tree"),
};
let sdk_root = sdk.get_path_prefix();
let repos: Vec<String> = ffx_config::get::<Vec<String>, _>(CONFIG_METADATA)
.await
.context("get config CONFIG_METADATA")?;
let repos: Vec<url::Url> = repos
.iter()
.map(|s| {
expand_placeholders(s, &version, &sdk_root.to_string_lossy())
.expect(&format!("URL for repo {:?}", s))
})
.collect();
Ok(repos)
}
/// Retrieve the path portion of a "file:/" url. Non-file-paths return None.
///
/// If the url has no scheme, the whole string is returned.
/// E.g.
/// - "/foo/bar" -> Some("/foo/bar")
/// - "file://foo/bar" -> Some("/foo/bar")
/// - "http://foo/bar" -> None
pub(crate) fn path_from_file_url(product_url: &url::Url) -> Option<PathBuf> {
if product_url.scheme() == "file" {
product_url.to_file_path().ok()
} else {
None
}
}
/// Get a list of product bundle entry names from `path`.
///
/// These are not full product_urls, but just the name that is used in the
/// fragment portion of the URL.
pub(crate) fn pb_names_from_path(path: &Path) -> Result<Vec<String>> {
let mut entries = Entries::new();
entries.add_from_path(path).context("adding from path")?;
Ok(entries
.iter()
.filter_map(|entry| match entry {
Metadata::ProductBundleV1(_) => Some(entry.name().to_string()),
_ => None,
})
.collect::<Vec<String>>())
}
/// Helper function for determining local path.
///
/// if `dir` return a directory path, else may return a glob (file) path.
pub(crate) async fn local_path_helper(
product_url: &url::Url,
add_dir: &str,
dir: bool,
sdk_root: &Path,
) -> Result<PathBuf> {
assert!(!product_url.fragment().is_none());
if let Some(path) = &path_from_file_url(product_url) {
if dir {
// TODO(fxbug.dev/98009): Unify the file layout between local and remote
// product bundles to avoid this hack.
if path.starts_with(sdk_root) {
Ok(sdk_root.to_path_buf())
} else {
Ok(path.parent().expect("parent of file path").to_path_buf())
}
} else {
Ok(path.to_path_buf())
}
} else {
let url = url_sans_fragment(&product_url)?;
Ok(get_product_dir(&url).await?.join(add_dir))
}
}
/// Retrieve the storage directory path from the config.
pub async fn get_storage_dir() -> Result<PathBuf> {
let storage_path: PathBuf =
ffx_config::get(CONFIG_STORAGE_PATH).await.context("getting CONFIG_STORAGE_PATH")?;
Ok(storage_path)
}
/// Retrieve the product directory path from the config.
///
/// This is the storage path plus a hash of the `product_url` provided.
pub async fn get_product_dir(product_url: &url::Url) -> Result<PathBuf> {
Ok(get_storage_dir().await?.join(pb_dir_name(product_url)))
}
/// Separate the URL on the last "#" character.
///
/// If no "#" is found, use the whole input as the url.
///
/// "file://foo#bar" -> "file://foo"
/// "file://foo" -> "file://foo"
pub(crate) fn url_sans_fragment(product_url: &url::Url) -> Result<url::Url> {
let mut product_url = product_url.to_owned();
product_url.set_fragment(None);
Ok(product_url)
}
/// Helper for `get_product_data()`, see docs there.
pub(crate) async fn get_product_data_from_gcs<I>(
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
tracing::debug!("get_product_data_from_gcs {:?} to {:?}", product_url, local_repo_dir);
assert_eq!(product_url.scheme(), GS_SCHEME);
let product_name = product_url.fragment().expect("URL with trailing product_name fragment.");
let url = url_sans_fragment(product_url)?;
fetch_product_metadata(
&url,
local_repo_dir,
auth_flow,
&mut |_d, _f| Ok(ProgressResponse::Continue),
ui,
)
.await
.context("fetching metadata")?;
let file_path = local_repo_dir.join("product_bundles.json");
if!file_path.is_file() {
bail!("product_bundles.json not found {:?}.", file_path);
}
let mut entries = Entries::new();
entries.add_from_path(&file_path).context("adding entries from gcs")?;
let product_bundle = find_product_bundle(&entries, &Some(product_name.to_string()))
.context("finding product bundle")?;
fetch_data_for_product_bundle_v1(&product_bundle, &url, local_repo_dir, auth_flow, ui).await
}
/// Helper for `get_product_data()`, see docs there.
pub async fn fetch_data_for_product_bundle_v1<I>(
product_bundle: &sdk_metadata::ProductBundleV1,
product_url: &url::Url,
local_repo_dir: &std::path::Path,
auth_flow: &AuthFlowChoice,
ui: &I,
) -> Result<()>
where
I: structured_ui::Interface + Sync,
{
// Handy debugging switch to disable images download.
let temp_dir = TempDir::new_in(&local_repo_dir)?;
let temp_path = temp_dir.path();
if true {
let start = std::time::Instant::now();
tracing::info!(
"Getting product data for {:?} to {:?}",
product_bundle.name,
local_repo_dir
);
let local_dir = temp_path.join("images");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
for image in &product_bundle.images {
tracing::debug!("image {:?}", image);
let base_url =
make_remote_url(product_url, &image.base_uri).context("image.base_uri")?;
if!exists_in_gcs(&base_url.as_str(), auth_flow, ui).await? {
tracing::warn!("The base_uri does not exist: {}", base_url);
}
fetch_by_format(
&image.format,
&base_url,
&local_dir,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Image data", /*at=*/ 1, /*of=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| format!("fetching images for {}.", product_bundle.name))?;
}
tracing::debug!("Total fetch images runtime {} seconds.", start.elapsed().as_secs_f32());
}
// Handy debugging switch to disable packages download.
if true {
let start = std::time::Instant::now();
let local_dir = temp_path.join("packages");
async_fs::create_dir_all(&local_dir).await.context("creating directory")?;
tracing::info!(
"Getting package data for {:?}, local_dir {:?}",
product_bundle.name,
local_dir
);
fetch_package_repository_from_mirrors(
product_url,
&local_dir,
&product_bundle.packages,
auth_flow,
&|d, f| {
let mut progress = structured_ui::Progress::builder();
progress.title(&product_bundle.name);
progress.entry("Package data", /*at=*/ 2, /*at=*/ 2, "steps");
progress.entry(&d.name, d.at, d.of, "files");
progress.entry(&f.name, f.at, f.of, "bytes");
ui.present(&structured_ui::Presentation::Progress(progress))?;
Ok(ProgressResponse::Continue)
},
ui,
)
.await
.with_context(|| {
format!(
"fetch_package_repository_from_mirrors {:?}, local_dir {:?}",
product_url, local_dir
)
})?;
tracing::debug!("Total fetch packages runtime {} seconds.", start.elapsed().as_secs_f32());
}
let final_name = local_repo_dir.join(&product_bundle.name);
tracing::info!("Download of product data for {:?} is complete.", product_bundle.name);
tracing::info!("Renaming temporary directory to {}", final_name.display());
fs::rename(temp_path, final_name).expect("Renaming temp directory failed.");
tracing::info!("Data written to \"{}\".", local_repo_dir.display());
Ok(())
}
/// Generate a (likely) unique name for the URL.
///
/// URLs don't always make good file paths.
pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String {
let mut gcs_url = gcs_url.to_owned();
gcs_url.set_fragment(None);
use std::collections::hash_map::DefaultHasher;
use std::hash::Hash;
use std::hash::Hasher;
let mut s = DefaultHasher::new();
gcs_url.as_str().hash(&mut s);
let out = s.finish();
tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out);
format!("{}", out)
}
/// Download and expand data.
///
/// For a directory, all files in the directory are downloaded.
/// For a.tgz file, the file is downloaded and expanded.
async fn fetch_by_format<F, I>(
format: &str,
uri: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_by_format");
match format {
"files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await,
_ =>
// The schema currently defines only "files" or "tgz" (see RFC-100).
// This error could be a typo in the product bundle or a new image
// format has been added and this code needs an update.
{
bail!(
"Unexpected image format ({:?}) in product bundle. \
Supported formats are \"files\" and \"tgz\". \
Please report as a bug.",
format,
)
}
}
}
/// Download data from any of the supported schemes listed in RFC-100, Product
/// Bundle, "bundle_uri".
///
/// Currently: "pattern": "^(?:http|https|gs|file):\/\/"
pub(crate) async fn fetch_bundle_uri<F, I>(
product_url: &url::Url,
local_dir: &Path,
auth_flow: &AuthFlowChoice,
progress: &F,
ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_bundle_uri");
if product_url.scheme() == GS_SCHEME {
fetch_from_gcs(product_url.as_str(), local_dir, auth_flow, progress, ui)
.await
.context("Downloading from GCS.")?;
} else if product_url.scheme() == "http" || product_url.scheme() == "https" {
fetch_from_web(product_url, local_dir, progress, ui)
.await
.context("fetching from http(s)")?;
} else if let Some(_) = &path_from_file_url(product_url) {
// Since the file is already local, no fetch is necessary.
tracing::debug!("Found local file path {:?}", product_url);
} else {
bail!("Unexpected URI scheme in ({:?})", product_url);
}
Ok(())
}
async fn fetch_from_web<F, I>(
product_uri: &url::Url,
local_dir: &Path,
progress: &F,
_ui: &I,
) -> Result<()>
where
F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult,
I: structured_ui::Interface + Sync,
{
tracing::debug!("fetch_from_web");
let name = if let Some((_, name)) = product_uri.path().rsplit_once('/') {
name
} else {
unimplemented!()
};
if name.is_empty() {
unimplemented!("downloading a directory from a web server is not implemented");
}
let res = fuchsia_hyper::new_client()
.get(hyper::Uri::from_maybe_shared(product_uri.to_string())?)
.await
.with_context(|| format!("Requesting {}", product_uri))?;
match res.status() {
StatusCode::OK => {}
StatusCode::NOT_FOUND => {
bail!("{} not found", product_uri);
}
status => {
bail!("Unexpected HTTP status downloading {}: {}", product_uri, status);
}
}
let mut at: u64 = 0;
let length = if res.headers().contains_key(CONTENT_LENGTH) {
res.headers()
.get(CONTENT_LENGTH)
.context("getting content length")?
.to_str()?
.parse::<u64>()
.context("parsing content length")?
} else {
0
};
std::fs::create_dir_all(local_dir)
.with_context(|| format!("Creating {}", local_dir.display()))?;
let path = local_dir.join(name);
let mut file =
File::create(&path).await.with_context(|| format!("Creating {}", path.display()))?;
let mut stream = res.into_body();
let mut of = length;
// Throttle the progress UI updates to avoid burning CPU on changes
// the user will have trouble seeing anyway. Without throttling,
// around 20% of the execution time can be spent updating the
// progress UI. The throttle makes the overhead negligible.
let mut throttle = Throttle::from_duration(std::time::Duration::from_millis(500));
let url = product_uri.to_string();
while let Some(chunk) =
stream.try_next().await.with_context(|| format!("Downloading {}", product_uri))?
{
file.write_all(&chunk).await.with_context(|| format!("Writing {}", path.display()))?;
at += chunk.len() as u64;
if at > of {
of = at;
}
if throttle.is_ready() {
match progress(
DirectoryProgress { name: &url, at: 0, of: 1, units: "files" },
FileProgress { name: &url, at, of, units: "bytes" },
)
.context("rendering progress")?
{
ProgressResponse::Cancel => break,
_ => (),
}
}
}
file.close().await.with_context(|| format!("Closing {}", path.display()))?;
Ok(())
}
/// If internal_url is a file scheme, join `product_url` and `internal_url`.
/// Otherwise, return `internal_url`.
pub(crate) fn make_remote_url(product_url: &url::Url, internal_url: &str) -> Result<url::Url> {
let result = if let Some(remainder) = internal_url.strip_prefix("file:/") {
// Note: The product_url must either be a path to the product_bundle.json file or to the
// parent directory (with a trailing slash).
product_url.join(remainder)?
} else {
url::Url::parse(&internal_url).with_context(|| format!("parsing url {:?}", internal_url))?
};
tracing::debug!(
"make_remote_url product_url {:?}, internal_url {:?}, result {:?}",
product_url,
internal_url,
result
);
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
#[fuchsia_async::run_singlethreaded(test)]
async fn test_path_from_file_url() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let input = url::Url::parse("file://foo#bar").expect("url");
let output = path_from_file_url(&input);
assert!(output.is_none());
let input = url::Url::parse("file:///foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(Path::new("/foo").to_path_buf()));
let temp_dir = tempfile::TempDir::new().expect("temp dir");
let base_url = url::Url::from_directory_path(temp_dir.path().join("a/b/c/d")).expect("url");
let input =
url::Url::options().base_url(Some(&base_url)).parse("../../foo#bar").expect("url");
let output = path_from_file_url(&input);
assert_eq!(output, Some(temp_dir.path().join("a/b/foo").to_path_buf()));
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_url_sans_fragment() {
let input = url::Url::parse("fake://foo#bar").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
let input = url::Url::parse("fake://foo").expect("url");
let output = url_sans_fragment(&input).expect("sans fragment");
assert_eq!(output, url::Url::parse("fake://foo").expect("check url"));
}
// Disabling this test until a test config can be modified without altering
// the local user's config.
#[ignore]
#[fuchsia_async::run_singlethreaded(test)]
async fn | () {
let sdk_prefix = PathBuf::from("/"); // this is only used for file paths
let url = url::Url::parse("fake://foo#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
// Note that the hash will be the same even though the fragment is
// different.
let url = url::Url::parse("fake://foo#blah").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/951333825719265977/foo"));
let url = url::Url::parse("gs://foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert!(path.to_string_lossy().ends_with("ffx/pbms/16042545670964745983/foo"));
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ true, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah");
let url = url::Url::parse("file:///foo/blah/*.json#bar").expect("url");
let path =
local_path_helper(&url, "foo", /*dir=*/ false, &sdk_prefix).await.expect("dir helper");
assert_eq!(path.to_string_lossy(), "/foo/blah/*.json");
}
#[fuchsia_async::run_singlethreaded(test)]
#[should_panic(expected = "Unexpected image format")]
async fn test_fetch_by_format() {
let url = url::Url::parse("fake://foo").expect("url");
| test_local_path_helper | identifier_name |
lib.rs | //! `litem` is a compile-time templating library.
//!
//! At compile time, the `#[template]` attribute will read a template file,
//! parse it, and generate the rendering functions.
//!
//! ## Template Syntax
//! ### Expressions
//! - `{EXPR}` will expand to value of `EXPR`, which
//! can be any Rust expression that implements `fmt::Display`.
//! If escaping is enabled, the expression will be escaped.
//! - `{@EXPR}` will expand to the value of `EXPR`,
//! but will never perform escaping, even when it is enabled.
//!
//! Inside of a template, there are two built-in variables:
//! - `self` represents the template data.
//! - `writer` represents a `&mut impl io::Write`.
//!
//! ### Statements
//! - `{:STMT}`, where `STMT` can be any arbitrary Rust statement (including `let` and `use` statements),
//! will run `STMT` and will not expand to anything.
//!
//! The variable `writer` should not be reassigned, under any circumstances.
//!
//! All *blocks* create a new scope for variables.
//!
//! #### `if`
//! - `{:if EXPR}` begins an `if` block.
//! - `{:else if EXPR}` and `{:else}` can be placed inside of an `if` block.
//! - `{:end}` ends an `if` block.
//!
//! #### `match`
//! - `{:match EXPR}` begins an `match` block.
//! - `{:case PATTERN}` begins a `case` block and must be placed inside of a `match` block.
//! `PATTERN` can be any pattern that is accepted by rust inside of a `match` statement.
//! - `{:end}` ends a `match` block or a `case` block.
//!
//! #### `loop`, `while`, `for`
//! - `{:loop}`, `{:while EXPR}`, `{:for PATTERN in ITER}` function just like their Rust counterparts
//! and begin their corresponding blocks
//! - `{:end}` ends a loop block.
//!
//! #### `block`
//! You can use `{:block}` / `{:end}` to create a separate scope for variables, in case you don't want
//! symbols leaking into the surrounding scope.
//!
//! #### `include`
//! Use `{:include EXPR}`, where `EXPR` is a litem template, to include one template inside of another.
//!
//! ### Raw Text
//! `{# RAW TEXT }` will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler.
//! This is especially useful when surrounding blocks of CSS.
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::quote;
use syn;
use syn::parse::Parse;
use syn::parse_macro_input;
use std::borrow::Cow;
use std::str::FromStr;
/// Generates functions for rendering a template.
/// Should be applied to a `struct` declaration.
///
/// ## Meta Items
/// `#[template]`'s first meta item must be a string literal
/// representing the path to the template file.
///
/// Subsequent attributes must be in `key=value` format. Currently,
/// the following keys are supported:
///
/// | Key | Possible Values | Default Value | Description |
/// |---|---|---|---|
/// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '<', '>', and '&' respectively. If `"txt"` is selected, no escaping is performed. |
///
/// ## Generated Methods
///
/// `#[template]` will generate two associated methods with the following signatures:
///
/// ```no_run
/// # use std::io;
/// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>;
/// pub fn render_string(&self) -> io::Result<String>;
/// ```
#[proc_macro_attribute]
pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let item = parse_macro_input!(item as syn::ItemStruct);
let attr = parse_macro_input!(attr as syn::AttributeArgs);
let path = match &attr[0] {
syn::NestedMeta::Lit(lit) => {
match lit {
syn::Lit::Str(s) => {
s.value()
},
_ => panic!("#[template]: expected string literal for path")
}
},
_ => {
panic!("#[template]: expected string literal for path")
}
};
let mut escape = "txt".to_string();
for attr in &attr[1..] {
match attr {
syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => {
let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path");
match ident.to_string().as_str() {
"escape" => {
let type_ = match &val.lit {
syn::Lit::Str(s) => s.value(),
_ => panic!("#[template]: attribute 'escape' must have string value")
};
escape = type_;
},
_ => panic!("#[template]: unknown attribute key '{}'", ident)
}
},
_ => panic!("#[template]: expected name = value")
}
}
let escape_func = match escape.as_str() {
"txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) },
"html" => |s: syn::Expr| -> TokenStream {
let q = quote! {
::std::string::ToString::to_string(&(#s)).replace("&", "&").replace("<", "<").replace(">", ">")
};
q
},
_ => panic!("#[template]: unknown escape type: {}", escape)
};
eprintln!("{:?}", std::env::current_dir().unwrap());
let template = std::fs::read_to_string(path).unwrap();
let parts = PartIterator(TokenIterator::new(&template));
let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]];
for part in parts {
match part {
Part::Text(t) => {
let last = vecs.len()-1;
vecs[last].push(quote! {
write!(writer, "{}", #t)?;
}.into());
},
Part::Expr(expr, raw) => {
let last = vecs.len()-1;
if raw {
vecs[last].push(quote! {
write!(writer, "{}", #expr)?;
}.into());
} else {
let tokens = escape_func(expr);
vecs[last].push(quote! {
write!(writer, "{}", #tokens)?;
}.into());
}
},
Part::Stmt(stmt) => {
let last = vecs.len()-1;
vecs[last].push(stmt.into_iter()
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into()
])
)
.collect::<TokenStream>());
},
Part::GroupStart(tokens) => {
let last = vecs.len()-1;
vecs[last].push(tokens);
vecs.push(vec![]);
},
Part::GroupEnd => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
},
Part::GroupStartEnd(tokens) => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
vecs[last].push(tokens);
vecs.push(vec![]);
}
}
}
let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>();
let item_ = item.clone();
let name = item.ident;
let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl();
let q = quote! {
#item_
impl #impl_gen #name #type_gen #where_clause {
pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> {
#code
Ok(())
}
pub fn render_string(&self) -> ::std::io::Result<String> {
let mut buf: Vec<u8> = Vec::new();
self.render(&mut buf)?;
Ok(String::from_utf8_lossy(&buf).into_owned())
}
}
};
q.into()
}
#[derive(Clone)]
enum Part {
Text(String),
Expr(syn::Expr, bool),
Stmt(TokenStream),
GroupStart(TokenStream),
GroupStartEnd(TokenStream),
GroupEnd,
}
struct PartIterator<'i>(pub TokenIterator<'i>);
impl<'i> Iterator for PartIterator<'i> {
type Item = Part;
fn next(&mut self) -> Option<Part> {
let tok = self.0.next()?;
Some(match tok {
Token::Text(t) => Part::Text(t.into_owned()),
Token::Expr(t, raw) => {
let expr = syn::parse_str(t).unwrap();
Part::Expr(expr, raw)
},
Token::Stmt(t) => {
let tokens = TokenStream::from_str(t).unwrap();
match tokens.clone().into_iter().next() {
Some(proc_macro2::TokenTree::Ident(ident)) => {
match ident.to_string().as_str() {
"for" | "if" | "match" | "while" | "loop" => {
Part::GroupStart(tokens)
},
"else" => {
Part::GroupStartEnd(tokens)
}
"case" => {
Part::GroupStart(
tokens.into_iter().skip(1)
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(),
proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(),
])
)
.collect())
},
"block" => {
Part::GroupStart(TokenStream::new())
},
"end" => {
Part::GroupEnd
},
"include" => {
let tokens = tokens.into_iter().skip(1).collect::<TokenStream>();
Part::Stmt(quote! {
(#tokens).render(writer)?
})
},
_ => Part::Stmt(tokens)
}
},
_ => {
Part::Stmt(tokens)
}
}
}
})
}
}
#[derive(Clone, Debug)]
enum Token<'i> {
Text(Cow<'i, str>),
Expr(&'i str, bool),
Stmt(&'i str),
}
struct TokenIterator<'i> {
src: &'i str,
chars: std::iter::Peekable<std::str::CharIndices<'i>>,
}
impl<'i> TokenIterator<'i> {
pub fn new(src: &'i str) -> Self |
}
impl<'i> Iterator for TokenIterator<'i> {
type Item = Token<'i>;
fn next(&mut self) -> Option<Token<'i>> {
let (first_idx, first) = match self.chars.peek() {
None => return None,
Some(v) => *v,
};
let mut n_braces = 0;
let (final_idx, final_) = loop {
let (idx, chr) = self.chars.next().unwrap();
let (next_idx, next_chr) = match self.chars.peek() {
None => { break (idx, chr); },
Some(x) => *x,
};
if first!= '{' && next_chr == '{' {
break (idx, chr);
}
if first == '{' {
if next_chr == '{' {
n_braces += 1;
}
if next_chr == '}' {
if n_braces == 0 {
self.chars.next();
break (next_idx, next_chr);
}
n_braces -= 1;
}
}
};
if first == '{' && final_!= '}' {
panic!("Unmatched braces");
}
let span = &self.src[first_idx..final_idx+1];
let second = span.chars().skip(1).next();
Some(match (first, second) {
('{', Some(':')) => Token::Stmt(&span[2..span.len()-1]),
('{', Some('@')) => Token::Expr(&span[2..span.len()-1], true),
('{', Some('#')) => Token::Text(format!("{}{}", "{", &span[2..span.len()]).into()),
('{', _) => Token::Expr(&span[1..span.len()-1], false),
_ => Token::Text(span.into())
})
}
} | {
Self {
src,
chars: src.char_indices().peekable(),
}
} | identifier_body |
lib.rs | //! `litem` is a compile-time templating library.
//!
//! At compile time, the `#[template]` attribute will read a template file,
//! parse it, and generate the rendering functions.
//!
//! ## Template Syntax
//! ### Expressions
//! - `{EXPR}` will expand to value of `EXPR`, which
//! can be any Rust expression that implements `fmt::Display`.
//! If escaping is enabled, the expression will be escaped.
//! - `{@EXPR}` will expand to the value of `EXPR`,
//! but will never perform escaping, even when it is enabled.
//!
//! Inside of a template, there are two built-in variables:
//! - `self` represents the template data.
//! - `writer` represents a `&mut impl io::Write`.
//!
//! ### Statements
//! - `{:STMT}`, where `STMT` can be any arbitrary Rust statement (including `let` and `use` statements),
//! will run `STMT` and will not expand to anything.
//!
//! The variable `writer` should not be reassigned, under any circumstances.
//!
//! All *blocks* create a new scope for variables.
//!
//! #### `if`
//! - `{:if EXPR}` begins an `if` block.
//! - `{:else if EXPR}` and `{:else}` can be placed inside of an `if` block.
//! - `{:end}` ends an `if` block.
//!
//! #### `match`
//! - `{:match EXPR}` begins an `match` block.
//! - `{:case PATTERN}` begins a `case` block and must be placed inside of a `match` block.
//! `PATTERN` can be any pattern that is accepted by rust inside of a `match` statement.
//! - `{:end}` ends a `match` block or a `case` block.
//!
//! #### `loop`, `while`, `for`
//! - `{:loop}`, `{:while EXPR}`, `{:for PATTERN in ITER}` function just like their Rust counterparts
//! and begin their corresponding blocks
//! - `{:end}` ends a loop block.
//!
//! #### `block`
//! You can use `{:block}` / `{:end}` to create a separate scope for variables, in case you don't want
//! symbols leaking into the surrounding scope.
//!
//! #### `include`
//! Use `{:include EXPR}`, where `EXPR` is a litem template, to include one template inside of another.
//!
//! ### Raw Text
//! `{# RAW TEXT }` will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler.
//! This is especially useful when surrounding blocks of CSS.
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::quote;
use syn;
use syn::parse::Parse;
use syn::parse_macro_input;
use std::borrow::Cow;
use std::str::FromStr;
/// Generates functions for rendering a template.
/// Should be applied to a `struct` declaration.
///
/// ## Meta Items
/// `#[template]`'s first meta item must be a string literal
/// representing the path to the template file.
///
/// Subsequent attributes must be in `key=value` format. Currently,
/// the following keys are supported:
///
/// | Key | Possible Values | Default Value | Description |
/// |---|---|---|---|
/// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '<', '>', and '&' respectively. If `"txt"` is selected, no escaping is performed. |
///
/// ## Generated Methods
///
/// `#[template]` will generate two associated methods with the following signatures:
///
/// ```no_run
/// # use std::io;
/// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>;
/// pub fn render_string(&self) -> io::Result<String>;
/// ```
#[proc_macro_attribute]
pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let item = parse_macro_input!(item as syn::ItemStruct);
let attr = parse_macro_input!(attr as syn::AttributeArgs);
let path = match &attr[0] {
syn::NestedMeta::Lit(lit) => {
match lit {
syn::Lit::Str(s) => {
s.value()
},
_ => panic!("#[template]: expected string literal for path")
}
},
_ => {
panic!("#[template]: expected string literal for path")
}
};
let mut escape = "txt".to_string();
for attr in &attr[1..] {
match attr {
syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => {
let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path");
match ident.to_string().as_str() {
"escape" => {
let type_ = match &val.lit {
syn::Lit::Str(s) => s.value(),
_ => panic!("#[template]: attribute 'escape' must have string value")
};
escape = type_;
},
_ => panic!("#[template]: unknown attribute key '{}'", ident)
}
},
_ => panic!("#[template]: expected name = value")
}
}
let escape_func = match escape.as_str() {
"txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) },
"html" => |s: syn::Expr| -> TokenStream {
let q = quote! {
::std::string::ToString::to_string(&(#s)).replace("&", "&").replace("<", "<").replace(">", ">")
};
q
},
_ => panic!("#[template]: unknown escape type: {}", escape)
};
eprintln!("{:?}", std::env::current_dir().unwrap());
let template = std::fs::read_to_string(path).unwrap();
let parts = PartIterator(TokenIterator::new(&template));
let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]];
for part in parts {
match part {
Part::Text(t) => {
let last = vecs.len()-1;
vecs[last].push(quote! {
write!(writer, "{}", #t)?;
}.into());
},
Part::Expr(expr, raw) => {
let last = vecs.len()-1;
if raw {
vecs[last].push(quote! {
write!(writer, "{}", #expr)?;
}.into());
} else {
let tokens = escape_func(expr);
vecs[last].push(quote! {
write!(writer, "{}", #tokens)?;
}.into());
}
},
Part::Stmt(stmt) => {
let last = vecs.len()-1;
vecs[last].push(stmt.into_iter()
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into()
])
)
.collect::<TokenStream>());
},
Part::GroupStart(tokens) => {
let last = vecs.len()-1;
vecs[last].push(tokens);
vecs.push(vec![]);
},
Part::GroupEnd => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
},
Part::GroupStartEnd(tokens) => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
vecs[last].push(tokens);
vecs.push(vec![]);
}
}
}
let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>();
let item_ = item.clone();
let name = item.ident;
let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl();
let q = quote! {
#item_
impl #impl_gen #name #type_gen #where_clause {
pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> {
#code
Ok(())
}
pub fn render_string(&self) -> ::std::io::Result<String> {
let mut buf: Vec<u8> = Vec::new();
self.render(&mut buf)?;
Ok(String::from_utf8_lossy(&buf).into_owned())
}
}
};
q.into()
}
#[derive(Clone)]
enum Part {
Text(String),
Expr(syn::Expr, bool),
Stmt(TokenStream),
GroupStart(TokenStream),
GroupStartEnd(TokenStream),
GroupEnd,
}
struct PartIterator<'i>(pub TokenIterator<'i>);
impl<'i> Iterator for PartIterator<'i> {
type Item = Part;
fn next(&mut self) -> Option<Part> {
let tok = self.0.next()?;
Some(match tok {
Token::Text(t) => Part::Text(t.into_owned()),
Token::Expr(t, raw) => {
let expr = syn::parse_str(t).unwrap();
Part::Expr(expr, raw)
},
Token::Stmt(t) => {
let tokens = TokenStream::from_str(t).unwrap();
match tokens.clone().into_iter().next() {
Some(proc_macro2::TokenTree::Ident(ident)) => {
match ident.to_string().as_str() {
"for" | "if" | "match" | "while" | "loop" => {
Part::GroupStart(tokens)
},
"else" => {
Part::GroupStartEnd(tokens)
}
"case" => {
Part::GroupStart(
tokens.into_iter().skip(1)
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(),
proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(),
])
)
.collect())
},
"block" => {
Part::GroupStart(TokenStream::new())
},
"end" => {
Part::GroupEnd
},
"include" => {
let tokens = tokens.into_iter().skip(1).collect::<TokenStream>();
Part::Stmt(quote! {
(#tokens).render(writer)?
})
},
_ => Part::Stmt(tokens)
}
},
_ => {
Part::Stmt(tokens)
}
}
}
})
}
}
#[derive(Clone, Debug)]
enum Token<'i> {
Text(Cow<'i, str>),
Expr(&'i str, bool),
Stmt(&'i str),
}
struct TokenIterator<'i> {
src: &'i str,
chars: std::iter::Peekable<std::str::CharIndices<'i>>,
}
impl<'i> TokenIterator<'i> {
pub fn new(src: &'i str) -> Self {
Self {
src,
chars: src.char_indices().peekable(),
}
}
}
impl<'i> Iterator for TokenIterator<'i> {
type Item = Token<'i>;
fn | (&mut self) -> Option<Token<'i>> {
let (first_idx, first) = match self.chars.peek() {
None => return None,
Some(v) => *v,
};
let mut n_braces = 0;
let (final_idx, final_) = loop {
let (idx, chr) = self.chars.next().unwrap();
let (next_idx, next_chr) = match self.chars.peek() {
None => { break (idx, chr); },
Some(x) => *x,
};
if first!= '{' && next_chr == '{' {
break (idx, chr);
}
if first == '{' {
if next_chr == '{' {
n_braces += 1;
}
if next_chr == '}' {
if n_braces == 0 {
self.chars.next();
break (next_idx, next_chr);
}
n_braces -= 1;
}
}
};
if first == '{' && final_!= '}' {
panic!("Unmatched braces");
}
let span = &self.src[first_idx..final_idx+1];
let second = span.chars().skip(1).next();
Some(match (first, second) {
('{', Some(':')) => Token::Stmt(&span[2..span.len()-1]),
('{', Some('@')) => Token::Expr(&span[2..span.len()-1], true),
('{', Some('#')) => Token::Text(format!("{}{}", "{", &span[2..span.len()]).into()),
('{', _) => Token::Expr(&span[1..span.len()-1], false),
_ => Token::Text(span.into())
})
}
} | next | identifier_name |
lib.rs | //! `litem` is a compile-time templating library.
//!
//! At compile time, the `#[template]` attribute will read a template file,
//! parse it, and generate the rendering functions.
//!
//! ## Template Syntax
//! ### Expressions
//! - `{EXPR}` will expand to value of `EXPR`, which
//! can be any Rust expression that implements `fmt::Display`.
//! If escaping is enabled, the expression will be escaped.
//! - `{@EXPR}` will expand to the value of `EXPR`,
//! but will never perform escaping, even when it is enabled.
//!
//! Inside of a template, there are two built-in variables:
//! - `self` represents the template data.
//! - `writer` represents a `&mut impl io::Write`.
//!
//! ### Statements
//! - `{:STMT}`, where `STMT` can be any arbitrary Rust statement (including `let` and `use` statements),
//! will run `STMT` and will not expand to anything.
//!
//! The variable `writer` should not be reassigned, under any circumstances.
//!
//! All *blocks* create a new scope for variables.
//!
//! #### `if`
//! - `{:if EXPR}` begins an `if` block.
//! - `{:else if EXPR}` and `{:else}` can be placed inside of an `if` block.
//! - `{:end}` ends an `if` block.
//!
//! #### `match`
//! - `{:match EXPR}` begins an `match` block.
//! - `{:case PATTERN}` begins a `case` block and must be placed inside of a `match` block.
//! `PATTERN` can be any pattern that is accepted by rust inside of a `match` statement.
//! - `{:end}` ends a `match` block or a `case` block.
//!
//! #### `loop`, `while`, `for`
//! - `{:loop}`, `{:while EXPR}`, `{:for PATTERN in ITER}` function just like their Rust counterparts
//! and begin their corresponding blocks
//! - `{:end}` ends a loop block.
//!
//! #### `block`
//! You can use `{:block}` / `{:end}` to create a separate scope for variables, in case you don't want
//! symbols leaking into the surrounding scope.
//!
//! #### `include`
//! Use `{:include EXPR}`, where `EXPR` is a litem template, to include one template inside of another.
//!
//! ### Raw Text
//! `{# RAW TEXT }` will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler.
//! This is especially useful when surrounding blocks of CSS.
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::quote;
use syn;
use syn::parse::Parse;
use syn::parse_macro_input;
use std::borrow::Cow;
use std::str::FromStr;
/// Generates functions for rendering a template.
/// Should be applied to a `struct` declaration.
///
/// ## Meta Items
/// `#[template]`'s first meta item must be a string literal
/// representing the path to the template file.
///
/// Subsequent attributes must be in `key=value` format. Currently,
/// the following keys are supported:
///
/// | Key | Possible Values | Default Value | Description |
/// |---|---|---|---|
/// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '<', '>', and '&' respectively. If `"txt"` is selected, no escaping is performed. |
///
/// ## Generated Methods
///
/// `#[template]` will generate two associated methods with the following signatures:
///
/// ```no_run
/// # use std::io;
/// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>;
/// pub fn render_string(&self) -> io::Result<String>;
/// ```
#[proc_macro_attribute]
pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let item = parse_macro_input!(item as syn::ItemStruct);
let attr = parse_macro_input!(attr as syn::AttributeArgs);
let path = match &attr[0] {
syn::NestedMeta::Lit(lit) => {
match lit {
syn::Lit::Str(s) => {
s.value()
},
_ => panic!("#[template]: expected string literal for path")
}
},
_ => {
panic!("#[template]: expected string literal for path")
}
};
let mut escape = "txt".to_string();
for attr in &attr[1..] {
match attr {
syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => | ,
_ => panic!("#[template]: expected name = value")
}
}
let escape_func = match escape.as_str() {
"txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) },
"html" => |s: syn::Expr| -> TokenStream {
let q = quote! {
::std::string::ToString::to_string(&(#s)).replace("&", "&").replace("<", "<").replace(">", ">")
};
q
},
_ => panic!("#[template]: unknown escape type: {}", escape)
};
eprintln!("{:?}", std::env::current_dir().unwrap());
let template = std::fs::read_to_string(path).unwrap();
let parts = PartIterator(TokenIterator::new(&template));
let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]];
for part in parts {
match part {
Part::Text(t) => {
let last = vecs.len()-1;
vecs[last].push(quote! {
write!(writer, "{}", #t)?;
}.into());
},
Part::Expr(expr, raw) => {
let last = vecs.len()-1;
if raw {
vecs[last].push(quote! {
write!(writer, "{}", #expr)?;
}.into());
} else {
let tokens = escape_func(expr);
vecs[last].push(quote! {
write!(writer, "{}", #tokens)?;
}.into());
}
},
Part::Stmt(stmt) => {
let last = vecs.len()-1;
vecs[last].push(stmt.into_iter()
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into()
])
)
.collect::<TokenStream>());
},
Part::GroupStart(tokens) => {
let last = vecs.len()-1;
vecs[last].push(tokens);
vecs.push(vec![]);
},
Part::GroupEnd => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
},
Part::GroupStartEnd(tokens) => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
vecs[last].push(tokens);
vecs.push(vec![]);
}
}
}
let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>();
let item_ = item.clone();
let name = item.ident;
let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl();
let q = quote! {
#item_
impl #impl_gen #name #type_gen #where_clause {
pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> {
#code
Ok(())
}
pub fn render_string(&self) -> ::std::io::Result<String> {
let mut buf: Vec<u8> = Vec::new();
self.render(&mut buf)?;
Ok(String::from_utf8_lossy(&buf).into_owned())
}
}
};
q.into()
}
#[derive(Clone)]
enum Part {
Text(String),
Expr(syn::Expr, bool),
Stmt(TokenStream),
GroupStart(TokenStream),
GroupStartEnd(TokenStream),
GroupEnd,
}
struct PartIterator<'i>(pub TokenIterator<'i>);
impl<'i> Iterator for PartIterator<'i> {
type Item = Part;
fn next(&mut self) -> Option<Part> {
let tok = self.0.next()?;
Some(match tok {
Token::Text(t) => Part::Text(t.into_owned()),
Token::Expr(t, raw) => {
let expr = syn::parse_str(t).unwrap();
Part::Expr(expr, raw)
},
Token::Stmt(t) => {
let tokens = TokenStream::from_str(t).unwrap();
match tokens.clone().into_iter().next() {
Some(proc_macro2::TokenTree::Ident(ident)) => {
match ident.to_string().as_str() {
"for" | "if" | "match" | "while" | "loop" => {
Part::GroupStart(tokens)
},
"else" => {
Part::GroupStartEnd(tokens)
}
"case" => {
Part::GroupStart(
tokens.into_iter().skip(1)
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(),
proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(),
])
)
.collect())
},
"block" => {
Part::GroupStart(TokenStream::new())
},
"end" => {
Part::GroupEnd
},
"include" => {
let tokens = tokens.into_iter().skip(1).collect::<TokenStream>();
Part::Stmt(quote! {
(#tokens).render(writer)?
})
},
_ => Part::Stmt(tokens)
}
},
_ => {
Part::Stmt(tokens)
}
}
}
})
}
}
#[derive(Clone, Debug)]
enum Token<'i> {
Text(Cow<'i, str>),
Expr(&'i str, bool),
Stmt(&'i str),
}
struct TokenIterator<'i> {
src: &'i str,
chars: std::iter::Peekable<std::str::CharIndices<'i>>,
}
impl<'i> TokenIterator<'i> {
pub fn new(src: &'i str) -> Self {
Self {
src,
chars: src.char_indices().peekable(),
}
}
}
impl<'i> Iterator for TokenIterator<'i> {
type Item = Token<'i>;
fn next(&mut self) -> Option<Token<'i>> {
let (first_idx, first) = match self.chars.peek() {
None => return None,
Some(v) => *v,
};
let mut n_braces = 0;
let (final_idx, final_) = loop {
let (idx, chr) = self.chars.next().unwrap();
let (next_idx, next_chr) = match self.chars.peek() {
None => { break (idx, chr); },
Some(x) => *x,
};
if first!= '{' && next_chr == '{' {
break (idx, chr);
}
if first == '{' {
if next_chr == '{' {
n_braces += 1;
}
if next_chr == '}' {
if n_braces == 0 {
self.chars.next();
break (next_idx, next_chr);
}
n_braces -= 1;
}
}
};
if first == '{' && final_!= '}' {
panic!("Unmatched braces");
}
let span = &self.src[first_idx..final_idx+1];
let second = span.chars().skip(1).next();
Some(match (first, second) {
('{', Some(':')) => Token::Stmt(&span[2..span.len()-1]),
('{', Some('@')) => Token::Expr(&span[2..span.len()-1], true),
('{', Some('#')) => Token::Text(format!("{}{}", "{", &span[2..span.len()]).into()),
('{', _) => Token::Expr(&span[1..span.len()-1], false),
_ => Token::Text(span.into())
})
}
} | {
let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path");
match ident.to_string().as_str() {
"escape" => {
let type_ = match &val.lit {
syn::Lit::Str(s) => s.value(),
_ => panic!("#[template]: attribute 'escape' must have string value")
};
escape = type_;
},
_ => panic!("#[template]: unknown attribute key '{}'", ident)
}
} | conditional_block |
lib.rs | //! `litem` is a compile-time templating library.
//!
//! At compile time, the `#[template]` attribute will read a template file,
//! parse it, and generate the rendering functions.
//!
//! ## Template Syntax
//! ### Expressions
//! - `{EXPR}` will expand to value of `EXPR`, which
//! can be any Rust expression that implements `fmt::Display`.
//! If escaping is enabled, the expression will be escaped.
//! - `{@EXPR}` will expand to the value of `EXPR`,
//! but will never perform escaping, even when it is enabled.
//!
//! Inside of a template, there are two built-in variables:
//! - `self` represents the template data.
//! - `writer` represents a `&mut impl io::Write`.
//!
//! ### Statements
//! - `{:STMT}`, where `STMT` can be any arbitrary Rust statement (including `let` and `use` statements),
//! will run `STMT` and will not expand to anything.
//!
//! The variable `writer` should not be reassigned, under any circumstances.
//!
//! All *blocks* create a new scope for variables.
//!
//! #### `if`
//! - `{:if EXPR}` begins an `if` block.
//! - `{:else if EXPR}` and `{:else}` can be placed inside of an `if` block.
//! - `{:end}` ends an `if` block.
//!
//! #### `match`
//! - `{:match EXPR}` begins an `match` block.
//! - `{:case PATTERN}` begins a `case` block and must be placed inside of a `match` block.
//! `PATTERN` can be any pattern that is accepted by rust inside of a `match` statement.
//! - `{:end}` ends a `match` block or a `case` block.
//!
//! #### `loop`, `while`, `for`
//! - `{:loop}`, `{:while EXPR}`, `{:for PATTERN in ITER}` function just like their Rust counterparts
//! and begin their corresponding blocks
//! - `{:end}` ends a loop block.
//!
//! #### `block`
//! You can use `{:block}` / `{:end}` to create a separate scope for variables, in case you don't want
//! symbols leaking into the surrounding scope.
//!
//! #### `include`
//! Use `{:include EXPR}`, where `EXPR` is a litem template, to include one template inside of another.
//!
//! ### Raw Text
//! `{# RAW TEXT }` will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler.
//! This is especially useful when surrounding blocks of CSS.
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::quote;
use syn;
use syn::parse::Parse;
use syn::parse_macro_input;
use std::borrow::Cow;
use std::str::FromStr;
/// Generates functions for rendering a template.
/// Should be applied to a `struct` declaration.
///
/// ## Meta Items
/// `#[template]`'s first meta item must be a string literal
/// representing the path to the template file.
///
/// Subsequent attributes must be in `key=value` format. Currently,
/// the following keys are supported:
///
/// | Key | Possible Values | Default Value | Description |
/// |---|---|---|---|
/// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '<', '>', and '&' respectively. If `"txt"` is selected, no escaping is performed. |
///
/// ## Generated Methods
///
/// `#[template]` will generate two associated methods with the following signatures:
///
/// ```no_run
/// # use std::io;
/// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>;
/// pub fn render_string(&self) -> io::Result<String>;
/// ```
#[proc_macro_attribute]
pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let item = parse_macro_input!(item as syn::ItemStruct);
let attr = parse_macro_input!(attr as syn::AttributeArgs);
let path = match &attr[0] {
syn::NestedMeta::Lit(lit) => {
match lit {
syn::Lit::Str(s) => {
s.value()
},
_ => panic!("#[template]: expected string literal for path")
}
},
_ => {
panic!("#[template]: expected string literal for path")
}
};
let mut escape = "txt".to_string();
for attr in &attr[1..] {
match attr {
syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => {
let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path");
match ident.to_string().as_str() {
"escape" => {
let type_ = match &val.lit {
syn::Lit::Str(s) => s.value(),
_ => panic!("#[template]: attribute 'escape' must have string value")
};
escape = type_;
},
_ => panic!("#[template]: unknown attribute key '{}'", ident)
}
},
_ => panic!("#[template]: expected name = value")
}
}
let escape_func = match escape.as_str() {
"txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) },
"html" => |s: syn::Expr| -> TokenStream {
let q = quote! {
::std::string::ToString::to_string(&(#s)).replace("&", "&").replace("<", "<").replace(">", ">")
};
q
},
_ => panic!("#[template]: unknown escape type: {}", escape)
};
eprintln!("{:?}", std::env::current_dir().unwrap());
let template = std::fs::read_to_string(path).unwrap();
let parts = PartIterator(TokenIterator::new(&template));
let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]];
for part in parts {
match part {
Part::Text(t) => {
let last = vecs.len()-1;
vecs[last].push(quote! {
write!(writer, "{}", #t)?;
}.into());
},
Part::Expr(expr, raw) => {
let last = vecs.len()-1;
if raw {
vecs[last].push(quote! {
write!(writer, "{}", #expr)?;
}.into());
} else {
let tokens = escape_func(expr);
vecs[last].push(quote! {
write!(writer, "{}", #tokens)?;
}.into());
}
},
Part::Stmt(stmt) => {
let last = vecs.len()-1;
vecs[last].push(stmt.into_iter()
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into()
])
)
.collect::<TokenStream>());
},
Part::GroupStart(tokens) => {
let last = vecs.len()-1;
vecs[last].push(tokens);
vecs.push(vec![]);
},
Part::GroupEnd => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
},
Part::GroupStartEnd(tokens) => {
let vec = vecs.pop().expect("unmatched {:end} node");
let last = vecs.len()-1;
vecs[last].push(
<std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([
proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into()
])
.collect()
);
vecs[last].push(tokens);
vecs.push(vec![]);
}
}
}
let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>();
let item_ = item.clone();
let name = item.ident;
let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl();
let q = quote! {
#item_
impl #impl_gen #name #type_gen #where_clause {
pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> {
#code
Ok(())
}
pub fn render_string(&self) -> ::std::io::Result<String> {
let mut buf: Vec<u8> = Vec::new();
self.render(&mut buf)?;
Ok(String::from_utf8_lossy(&buf).into_owned())
}
}
};
q.into()
}
#[derive(Clone)]
enum Part {
Text(String),
Expr(syn::Expr, bool),
Stmt(TokenStream),
GroupStart(TokenStream),
GroupStartEnd(TokenStream),
GroupEnd,
}
struct PartIterator<'i>(pub TokenIterator<'i>);
impl<'i> Iterator for PartIterator<'i> {
type Item = Part;
fn next(&mut self) -> Option<Part> {
let tok = self.0.next()?;
Some(match tok {
Token::Text(t) => Part::Text(t.into_owned()),
Token::Expr(t, raw) => {
let expr = syn::parse_str(t).unwrap();
Part::Expr(expr, raw)
},
Token::Stmt(t) => {
let tokens = TokenStream::from_str(t).unwrap();
match tokens.clone().into_iter().next() {
Some(proc_macro2::TokenTree::Ident(ident)) => {
match ident.to_string().as_str() {
"for" | "if" | "match" | "while" | "loop" => {
Part::GroupStart(tokens)
},
"else" => {
Part::GroupStartEnd(tokens)
}
"case" => {
Part::GroupStart(
tokens.into_iter().skip(1)
.chain(
std::array::IntoIter::new([
proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(),
proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(),
])
)
.collect())
},
"block" => {
Part::GroupStart(TokenStream::new())
},
"end" => {
Part::GroupEnd
},
"include" => {
let tokens = tokens.into_iter().skip(1).collect::<TokenStream>();
Part::Stmt(quote! {
(#tokens).render(writer)?
})
},
_ => Part::Stmt(tokens)
}
},
_ => {
Part::Stmt(tokens)
}
}
}
})
}
}
#[derive(Clone, Debug)]
enum Token<'i> {
Text(Cow<'i, str>),
Expr(&'i str, bool),
Stmt(&'i str),
}
struct TokenIterator<'i> {
src: &'i str,
chars: std::iter::Peekable<std::str::CharIndices<'i>>,
}
impl<'i> TokenIterator<'i> {
pub fn new(src: &'i str) -> Self {
Self {
src,
chars: src.char_indices().peekable(),
}
}
}
impl<'i> Iterator for TokenIterator<'i> {
type Item = Token<'i>;
fn next(&mut self) -> Option<Token<'i>> {
let (first_idx, first) = match self.chars.peek() {
None => return None,
Some(v) => *v,
};
let mut n_braces = 0;
let (final_idx, final_) = loop {
let (idx, chr) = self.chars.next().unwrap();
let (next_idx, next_chr) = match self.chars.peek() { | if first!= '{' && next_chr == '{' {
break (idx, chr);
}
if first == '{' {
if next_chr == '{' {
n_braces += 1;
}
if next_chr == '}' {
if n_braces == 0 {
self.chars.next();
break (next_idx, next_chr);
}
n_braces -= 1;
}
}
};
if first == '{' && final_!= '}' {
panic!("Unmatched braces");
}
let span = &self.src[first_idx..final_idx+1];
let second = span.chars().skip(1).next();
Some(match (first, second) {
('{', Some(':')) => Token::Stmt(&span[2..span.len()-1]),
('{', Some('@')) => Token::Expr(&span[2..span.len()-1], true),
('{', Some('#')) => Token::Text(format!("{}{}", "{", &span[2..span.len()]).into()),
('{', _) => Token::Expr(&span[1..span.len()-1], false),
_ => Token::Text(span.into())
})
}
} | None => { break (idx, chr); },
Some(x) => *x,
};
| random_line_split |
driver.rs | use std::any::Any;
use std::ffi::CString;
use std::fs::File;
use std::os::raw::{c_char, c_int};
use rustc::middle::cstore::EncodedMetadata;
use rustc::mir::mono::{Linkage as RLinkage, Visibility};
use rustc::session::config::{DebugInfo, OutputType};
use rustc_codegen_ssa::back::linker::LinkerInfo;
use rustc_codegen_ssa::CrateInfo;
use cranelift_faerie::*;
use crate::prelude::*;
pub fn codegen_crate(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
let mut log = if cfg!(debug_assertions) {
Some(File::create(concat!(env!("CARGO_MANIFEST_DIR"), "/target/out/log.txt")).unwrap())
} else {
None
};
if std::env::var("SHOULD_RUN").is_ok()
&& tcx.sess.crate_types.get().contains(&CrateType::Executable)
{
#[cfg(not(target_arch = "wasm32"))]
let _:! = run_jit(tcx, &mut log);
#[cfg(target_arch = "wasm32")]
panic!("jit not supported on wasm");
}
run_aot(tcx, metadata, need_metadata_module, &mut log)
}
#[cfg(not(target_arch = "wasm32"))]
fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) ->! {
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut jit_builder = SimpleJITBuilder::with_isa(
crate::build_isa(tcx.sess, false),
cranelift_module::default_libcall_names(),
);
jit_builder.symbols(imported_symbols);
let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder);
assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
let sig = Signature {
params: vec![
AbiParam::new(jit_module.target_config().pointer_type()),
AbiParam::new(jit_module.target_config().pointer_type()),
],
returns: vec![AbiParam::new(
jit_module.target_config().pointer_type(), /*isize*/
)],
call_conv: CallConv::SystemV,
};
let main_func_id = jit_module
.declare_function("main", Linkage::Import, &sig)
.unwrap();
codegen_cgus(tcx, &mut jit_module, &mut None, log);
crate::allocator::codegen(tcx.sess, &mut jit_module);
jit_module.finalize_definitions();
tcx.sess.abort_if_errors();
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set");
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
unsafe { ::std::mem::transmute(finalized_main) };
let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new());
let args = args
.split(" ")
.chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string()))
.map(|arg| CString::new(arg).unwrap())
.collect::<Vec<_>>();
let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
// TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end
let ret = f(args.len() as c_int, argv.as_ptr());
jit_module.finish();
std::process::exit(ret);
}
fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
use rustc::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
let crate_info = CrateInfo::new(tcx);
let formats = tcx.sess.dependency_formats.borrow();
let data = formats.get(&CrateType::Executable).unwrap();
for &(cnum, _) in &crate_info.used_crates_dynamic {
let src = &crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
let name = tcx.crate_name(cnum);
let mut err = tcx
.sess
.struct_err(&format!("Can't load static lib {}", name.as_str()));
err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
err.emit();
}
Linkage::Dynamic => {
dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
}
}
}
let mut imported_symbols = Vec::new();
for path in dylib_paths {
use object::Object;
let lib = libloading::Library::new(&path).unwrap();
let obj = std::fs::read(path).unwrap();
let obj = object::File::parse(&obj).unwrap();
imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
let name = symbol.name().unwrap().to_string();
if name.is_empty() ||!symbol.is_global() || symbol.is_undefined() {
return None;
}
let symbol: libloading::Symbol<*const u8> =
unsafe { lib.get(name.as_bytes()) }.unwrap();
Some((name, *symbol))
}));
std::mem::forget(lib)
}
tcx.sess.abort_if_errors();
imported_symbols
}
fn run_aot(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
log: &mut Option<File>,
) -> Box<CodegenResults> {
let new_module = |name: String| {
let module: Module<FaerieBackend> = Module::new(
FaerieBuilder::new(
crate::build_isa(tcx.sess, true),
name + ".o",
FaerieTrapCollection::Disabled,
cranelift_module::default_libcall_names(),
)
.unwrap(),
);
assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
module
};
let emit_module = |kind: ModuleKind,
mut module: Module<FaerieBackend>,
debug: Option<DebugContext>| {
module.finalize_definitions();
let mut artifact = module.finish().artifact;
if let Some(mut debug) = debug {
debug.emit(&mut artifact);
}
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&artifact.name));
let obj = artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
CompiledModule {
name: artifact.name,
kind,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
}
};
let mut faerie_module = new_module("some_file".to_string());
let mut debug = if tcx.sess.opts.debuginfo!= DebugInfo::None
// macOS debuginfo doesn't work yet (see #303)
&&!tcx.sess.target.target.options.is_like_osx
{
let debug = DebugContext::new(
tcx,
faerie_module.target_config().pointer_type().bytes() as u8,
);
Some(debug)
} else {
None
};
codegen_cgus(tcx, &mut faerie_module, &mut debug, log);
tcx.sess.abort_if_errors();
let mut allocator_module = new_module("allocator_shim".to_string());
let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module);
rustc_incremental::assert_dep_graph(tcx);
rustc_incremental::save_dep_graph(tcx);
rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE));
let metadata_module = if need_metadata_module {
use rustc::mir::mono::CodegenUnitNameBuilder;
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
let metadata_cgu_name = cgu_name_builder
.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
.as_str()
.to_string();
let mut metadata_artifact = faerie::Artifact::new(
crate::build_isa(tcx.sess, true).triple().clone(),
metadata_cgu_name.clone(),
);
crate::metadata::write_metadata(tcx, &mut metadata_artifact);
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
let obj = metadata_artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
Some(CompiledModule {
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
})
} else {
None
};
Box::new(CodegenResults {
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![emit_module(
ModuleKind::Regular,
faerie_module,
debug,
)],
allocator_module: if created_alloc_shim {
Some(emit_module(
ModuleKind::Allocator,
allocator_module,
None,
))
} else {
None
},
metadata_module,
crate_hash: tcx.crate_hash(LOCAL_CRATE),
metadata,
windows_subsystem: None, // Windows is not yet supported
linker_info: LinkerInfo::new(tcx),
crate_info: CrateInfo::new(tcx),
})
}
fn codegen_cgus<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug: &mut Option<DebugContext<'tcx>>,
log: &mut Option<File>,
) |
fn codegen_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug_context: Option<&mut DebugContext<'tcx>>,
log: &mut Option<File>,
mono_items: FxHashMap<MonoItem<'tcx>, (RLinkage, Visibility)>,
) {
let mut cx = CodegenCx::new(tcx, module, debug_context);
time("codegen mono items", move || {
for (mono_item, (linkage, visibility)) in mono_items {
crate::unimpl::try_unimpl(tcx, log, || {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
trans_mono_item(&mut cx, mono_item, linkage);
});
}
cx.finalize();
});
}
fn trans_mono_item<'clif, 'tcx, B: Backend +'static>(
cx: &mut crate::CodegenCx<'clif, 'tcx, B>,
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
let tcx = cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name.as_str()));
debug_assert!(!inst.substs.needs_infer());
let _mir_guard = PrintOnPanic(|| {
match inst.def {
InstanceDef::Item(_)
| InstanceDef::DropGlue(_, _)
| InstanceDef::Virtual(_, _) => {
let mut mir = ::std::io::Cursor::new(Vec::new());
crate::rustc_mir::util::write_mir_pretty(
tcx,
Some(inst.def_id()),
&mut mir,
)
.unwrap();
String::from_utf8(mir.into_inner()).unwrap()
}
_ => {
// FIXME fix write_mir_pretty for these instances
format!("{:#?}", tcx.instance_mir(inst.def))
}
}
});
crate::base::trans_fn(cx, inst, linkage);
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(node_id) => tcx
.sess
.fatal(&format!("Unimplemented global asm mono item {:?}", node_id)),
}
}
fn time<R>(name: &str, f: impl FnOnce() -> R) -> R {
println!("[{}] start", name);
let before = std::time::Instant::now();
let res = f();
let after = std::time::Instant::now();
println!("[{}] end time: {:?}", name, after - before);
res
}
| {
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
let mono_items = cgus
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
.collect::<FxHashMap<_, (_, _)>>();
codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items);
crate::main_shim::maybe_create_entry_wrapper(tcx, module);
} | identifier_body |
driver.rs | use std::any::Any;
use std::ffi::CString;
use std::fs::File;
use std::os::raw::{c_char, c_int};
use rustc::middle::cstore::EncodedMetadata;
use rustc::mir::mono::{Linkage as RLinkage, Visibility};
use rustc::session::config::{DebugInfo, OutputType};
use rustc_codegen_ssa::back::linker::LinkerInfo;
use rustc_codegen_ssa::CrateInfo;
use cranelift_faerie::*;
use crate::prelude::*;
pub fn codegen_crate(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
let mut log = if cfg!(debug_assertions) {
Some(File::create(concat!(env!("CARGO_MANIFEST_DIR"), "/target/out/log.txt")).unwrap())
} else {
None
};
if std::env::var("SHOULD_RUN").is_ok()
&& tcx.sess.crate_types.get().contains(&CrateType::Executable)
{
#[cfg(not(target_arch = "wasm32"))]
let _:! = run_jit(tcx, &mut log);
#[cfg(target_arch = "wasm32")]
panic!("jit not supported on wasm");
}
run_aot(tcx, metadata, need_metadata_module, &mut log)
}
#[cfg(not(target_arch = "wasm32"))]
fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) ->! {
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut jit_builder = SimpleJITBuilder::with_isa(
crate::build_isa(tcx.sess, false),
cranelift_module::default_libcall_names(),
);
jit_builder.symbols(imported_symbols);
let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder);
assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
let sig = Signature {
params: vec![
AbiParam::new(jit_module.target_config().pointer_type()),
AbiParam::new(jit_module.target_config().pointer_type()),
],
returns: vec![AbiParam::new(
jit_module.target_config().pointer_type(), /*isize*/
)],
call_conv: CallConv::SystemV,
};
let main_func_id = jit_module
.declare_function("main", Linkage::Import, &sig)
.unwrap();
codegen_cgus(tcx, &mut jit_module, &mut None, log);
crate::allocator::codegen(tcx.sess, &mut jit_module);
jit_module.finalize_definitions();
tcx.sess.abort_if_errors();
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set");
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
unsafe { ::std::mem::transmute(finalized_main) };
let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new());
let args = args
.split(" ")
.chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string()))
.map(|arg| CString::new(arg).unwrap())
.collect::<Vec<_>>();
let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
// TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end
let ret = f(args.len() as c_int, argv.as_ptr());
jit_module.finish();
std::process::exit(ret);
}
fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
use rustc::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
let crate_info = CrateInfo::new(tcx);
let formats = tcx.sess.dependency_formats.borrow();
let data = formats.get(&CrateType::Executable).unwrap();
for &(cnum, _) in &crate_info.used_crates_dynamic {
let src = &crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
let name = tcx.crate_name(cnum);
let mut err = tcx
.sess
.struct_err(&format!("Can't load static lib {}", name.as_str()));
err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
err.emit();
}
Linkage::Dynamic => {
dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
}
}
}
let mut imported_symbols = Vec::new();
for path in dylib_paths {
use object::Object;
let lib = libloading::Library::new(&path).unwrap();
let obj = std::fs::read(path).unwrap();
let obj = object::File::parse(&obj).unwrap();
imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
let name = symbol.name().unwrap().to_string();
if name.is_empty() ||!symbol.is_global() || symbol.is_undefined() {
return None;
}
let symbol: libloading::Symbol<*const u8> =
unsafe { lib.get(name.as_bytes()) }.unwrap();
Some((name, *symbol))
}));
std::mem::forget(lib)
}
tcx.sess.abort_if_errors();
imported_symbols
}
fn run_aot(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
log: &mut Option<File>,
) -> Box<CodegenResults> {
let new_module = |name: String| {
let module: Module<FaerieBackend> = Module::new(
FaerieBuilder::new(
crate::build_isa(tcx.sess, true),
name + ".o",
FaerieTrapCollection::Disabled,
cranelift_module::default_libcall_names(),
)
.unwrap(),
);
assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
module
};
let emit_module = |kind: ModuleKind,
mut module: Module<FaerieBackend>,
debug: Option<DebugContext>| {
module.finalize_definitions();
let mut artifact = module.finish().artifact;
if let Some(mut debug) = debug {
debug.emit(&mut artifact);
}
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&artifact.name));
let obj = artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
CompiledModule {
name: artifact.name,
kind,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
}
};
let mut faerie_module = new_module("some_file".to_string());
let mut debug = if tcx.sess.opts.debuginfo!= DebugInfo::None
// macOS debuginfo doesn't work yet (see #303)
&&!tcx.sess.target.target.options.is_like_osx
{
let debug = DebugContext::new(
tcx,
faerie_module.target_config().pointer_type().bytes() as u8,
);
Some(debug)
} else {
None
};
codegen_cgus(tcx, &mut faerie_module, &mut debug, log);
tcx.sess.abort_if_errors();
let mut allocator_module = new_module("allocator_shim".to_string());
let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module);
rustc_incremental::assert_dep_graph(tcx);
rustc_incremental::save_dep_graph(tcx);
rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE));
let metadata_module = if need_metadata_module {
use rustc::mir::mono::CodegenUnitNameBuilder;
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
let metadata_cgu_name = cgu_name_builder
.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
.as_str()
.to_string();
let mut metadata_artifact = faerie::Artifact::new(
crate::build_isa(tcx.sess, true).triple().clone(),
metadata_cgu_name.clone(),
);
crate::metadata::write_metadata(tcx, &mut metadata_artifact);
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
let obj = metadata_artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
Some(CompiledModule {
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
})
} else {
None
};
Box::new(CodegenResults {
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![emit_module(
ModuleKind::Regular,
faerie_module,
debug,
)],
allocator_module: if created_alloc_shim {
Some(emit_module(
ModuleKind::Allocator,
allocator_module,
None,
))
} else {
None
},
metadata_module,
crate_hash: tcx.crate_hash(LOCAL_CRATE),
metadata,
windows_subsystem: None, // Windows is not yet supported
linker_info: LinkerInfo::new(tcx),
crate_info: CrateInfo::new(tcx),
})
}
fn codegen_cgus<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug: &mut Option<DebugContext<'tcx>>,
log: &mut Option<File>,
) {
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
let mono_items = cgus
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
.collect::<FxHashMap<_, (_, _)>>();
codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items);
crate::main_shim::maybe_create_entry_wrapper(tcx, module);
}
fn codegen_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug_context: Option<&mut DebugContext<'tcx>>,
log: &mut Option<File>,
mono_items: FxHashMap<MonoItem<'tcx>, (RLinkage, Visibility)>,
) {
let mut cx = CodegenCx::new(tcx, module, debug_context);
time("codegen mono items", move || {
for (mono_item, (linkage, visibility)) in mono_items {
crate::unimpl::try_unimpl(tcx, log, || {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
trans_mono_item(&mut cx, mono_item, linkage);
});
}
cx.finalize();
});
}
fn trans_mono_item<'clif, 'tcx, B: Backend +'static>(
cx: &mut crate::CodegenCx<'clif, 'tcx, B>,
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
let tcx = cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name.as_str()));
debug_assert!(!inst.substs.needs_infer());
let _mir_guard = PrintOnPanic(|| {
match inst.def {
InstanceDef::Item(_)
| InstanceDef::DropGlue(_, _)
| InstanceDef::Virtual(_, _) => {
let mut mir = ::std::io::Cursor::new(Vec::new());
crate::rustc_mir::util::write_mir_pretty(
tcx,
Some(inst.def_id()),
&mut mir,
)
.unwrap();
String::from_utf8(mir.into_inner()).unwrap()
}
_ => {
// FIXME fix write_mir_pretty for these instances
format!("{:#?}", tcx.instance_mir(inst.def))
}
}
});
crate::base::trans_fn(cx, inst, linkage);
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(node_id) => tcx
.sess
.fatal(&format!("Unimplemented global asm mono item {:?}", node_id)),
}
}
fn | <R>(name: &str, f: impl FnOnce() -> R) -> R {
println!("[{}] start", name);
let before = std::time::Instant::now();
let res = f();
let after = std::time::Instant::now();
println!("[{}] end time: {:?}", name, after - before);
res
}
| time | identifier_name |
driver.rs | use std::any::Any;
use std::ffi::CString;
use std::fs::File;
use std::os::raw::{c_char, c_int};
use rustc::middle::cstore::EncodedMetadata;
use rustc::mir::mono::{Linkage as RLinkage, Visibility};
use rustc::session::config::{DebugInfo, OutputType};
use rustc_codegen_ssa::back::linker::LinkerInfo;
use rustc_codegen_ssa::CrateInfo;
use cranelift_faerie::*;
use crate::prelude::*;
pub fn codegen_crate(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
let mut log = if cfg!(debug_assertions) {
Some(File::create(concat!(env!("CARGO_MANIFEST_DIR"), "/target/out/log.txt")).unwrap())
} else {
None
};
if std::env::var("SHOULD_RUN").is_ok()
&& tcx.sess.crate_types.get().contains(&CrateType::Executable)
{
#[cfg(not(target_arch = "wasm32"))]
let _:! = run_jit(tcx, &mut log);
#[cfg(target_arch = "wasm32")]
panic!("jit not supported on wasm");
}
run_aot(tcx, metadata, need_metadata_module, &mut log)
}
#[cfg(not(target_arch = "wasm32"))]
fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) ->! {
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut jit_builder = SimpleJITBuilder::with_isa(
crate::build_isa(tcx.sess, false),
cranelift_module::default_libcall_names(),
);
jit_builder.symbols(imported_symbols);
let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder);
assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
let sig = Signature {
params: vec![
AbiParam::new(jit_module.target_config().pointer_type()),
AbiParam::new(jit_module.target_config().pointer_type()),
],
returns: vec![AbiParam::new(
jit_module.target_config().pointer_type(), /*isize*/
)],
call_conv: CallConv::SystemV,
};
let main_func_id = jit_module
.declare_function("main", Linkage::Import, &sig) | jit_module.finalize_definitions();
tcx.sess.abort_if_errors();
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set");
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
unsafe { ::std::mem::transmute(finalized_main) };
let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new());
let args = args
.split(" ")
.chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string()))
.map(|arg| CString::new(arg).unwrap())
.collect::<Vec<_>>();
let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
// TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end
let ret = f(args.len() as c_int, argv.as_ptr());
jit_module.finish();
std::process::exit(ret);
}
fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
use rustc::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
let crate_info = CrateInfo::new(tcx);
let formats = tcx.sess.dependency_formats.borrow();
let data = formats.get(&CrateType::Executable).unwrap();
for &(cnum, _) in &crate_info.used_crates_dynamic {
let src = &crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
let name = tcx.crate_name(cnum);
let mut err = tcx
.sess
.struct_err(&format!("Can't load static lib {}", name.as_str()));
err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
err.emit();
}
Linkage::Dynamic => {
dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
}
}
}
let mut imported_symbols = Vec::new();
for path in dylib_paths {
use object::Object;
let lib = libloading::Library::new(&path).unwrap();
let obj = std::fs::read(path).unwrap();
let obj = object::File::parse(&obj).unwrap();
imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
let name = symbol.name().unwrap().to_string();
if name.is_empty() ||!symbol.is_global() || symbol.is_undefined() {
return None;
}
let symbol: libloading::Symbol<*const u8> =
unsafe { lib.get(name.as_bytes()) }.unwrap();
Some((name, *symbol))
}));
std::mem::forget(lib)
}
tcx.sess.abort_if_errors();
imported_symbols
}
fn run_aot(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
log: &mut Option<File>,
) -> Box<CodegenResults> {
let new_module = |name: String| {
let module: Module<FaerieBackend> = Module::new(
FaerieBuilder::new(
crate::build_isa(tcx.sess, true),
name + ".o",
FaerieTrapCollection::Disabled,
cranelift_module::default_libcall_names(),
)
.unwrap(),
);
assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
module
};
let emit_module = |kind: ModuleKind,
mut module: Module<FaerieBackend>,
debug: Option<DebugContext>| {
module.finalize_definitions();
let mut artifact = module.finish().artifact;
if let Some(mut debug) = debug {
debug.emit(&mut artifact);
}
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&artifact.name));
let obj = artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
CompiledModule {
name: artifact.name,
kind,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
}
};
let mut faerie_module = new_module("some_file".to_string());
let mut debug = if tcx.sess.opts.debuginfo!= DebugInfo::None
// macOS debuginfo doesn't work yet (see #303)
&&!tcx.sess.target.target.options.is_like_osx
{
let debug = DebugContext::new(
tcx,
faerie_module.target_config().pointer_type().bytes() as u8,
);
Some(debug)
} else {
None
};
codegen_cgus(tcx, &mut faerie_module, &mut debug, log);
tcx.sess.abort_if_errors();
let mut allocator_module = new_module("allocator_shim".to_string());
let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module);
rustc_incremental::assert_dep_graph(tcx);
rustc_incremental::save_dep_graph(tcx);
rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE));
let metadata_module = if need_metadata_module {
use rustc::mir::mono::CodegenUnitNameBuilder;
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
let metadata_cgu_name = cgu_name_builder
.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
.as_str()
.to_string();
let mut metadata_artifact = faerie::Artifact::new(
crate::build_isa(tcx.sess, true).triple().clone(),
metadata_cgu_name.clone(),
);
crate::metadata::write_metadata(tcx, &mut metadata_artifact);
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
let obj = metadata_artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
Some(CompiledModule {
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
})
} else {
None
};
Box::new(CodegenResults {
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![emit_module(
ModuleKind::Regular,
faerie_module,
debug,
)],
allocator_module: if created_alloc_shim {
Some(emit_module(
ModuleKind::Allocator,
allocator_module,
None,
))
} else {
None
},
metadata_module,
crate_hash: tcx.crate_hash(LOCAL_CRATE),
metadata,
windows_subsystem: None, // Windows is not yet supported
linker_info: LinkerInfo::new(tcx),
crate_info: CrateInfo::new(tcx),
})
}
fn codegen_cgus<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug: &mut Option<DebugContext<'tcx>>,
log: &mut Option<File>,
) {
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
let mono_items = cgus
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
.collect::<FxHashMap<_, (_, _)>>();
codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items);
crate::main_shim::maybe_create_entry_wrapper(tcx, module);
}
fn codegen_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug_context: Option<&mut DebugContext<'tcx>>,
log: &mut Option<File>,
mono_items: FxHashMap<MonoItem<'tcx>, (RLinkage, Visibility)>,
) {
let mut cx = CodegenCx::new(tcx, module, debug_context);
time("codegen mono items", move || {
for (mono_item, (linkage, visibility)) in mono_items {
crate::unimpl::try_unimpl(tcx, log, || {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
trans_mono_item(&mut cx, mono_item, linkage);
});
}
cx.finalize();
});
}
fn trans_mono_item<'clif, 'tcx, B: Backend +'static>(
cx: &mut crate::CodegenCx<'clif, 'tcx, B>,
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
let tcx = cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name.as_str()));
debug_assert!(!inst.substs.needs_infer());
let _mir_guard = PrintOnPanic(|| {
match inst.def {
InstanceDef::Item(_)
| InstanceDef::DropGlue(_, _)
| InstanceDef::Virtual(_, _) => {
let mut mir = ::std::io::Cursor::new(Vec::new());
crate::rustc_mir::util::write_mir_pretty(
tcx,
Some(inst.def_id()),
&mut mir,
)
.unwrap();
String::from_utf8(mir.into_inner()).unwrap()
}
_ => {
// FIXME fix write_mir_pretty for these instances
format!("{:#?}", tcx.instance_mir(inst.def))
}
}
});
crate::base::trans_fn(cx, inst, linkage);
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(node_id) => tcx
.sess
.fatal(&format!("Unimplemented global asm mono item {:?}", node_id)),
}
}
fn time<R>(name: &str, f: impl FnOnce() -> R) -> R {
println!("[{}] start", name);
let before = std::time::Instant::now();
let res = f();
let after = std::time::Instant::now();
println!("[{}] end time: {:?}", name, after - before);
res
} | .unwrap();
codegen_cgus(tcx, &mut jit_module, &mut None, log);
crate::allocator::codegen(tcx.sess, &mut jit_module); | random_line_split |
driver.rs | use std::any::Any;
use std::ffi::CString;
use std::fs::File;
use std::os::raw::{c_char, c_int};
use rustc::middle::cstore::EncodedMetadata;
use rustc::mir::mono::{Linkage as RLinkage, Visibility};
use rustc::session::config::{DebugInfo, OutputType};
use rustc_codegen_ssa::back::linker::LinkerInfo;
use rustc_codegen_ssa::CrateInfo;
use cranelift_faerie::*;
use crate::prelude::*;
pub fn codegen_crate(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
tcx.sess.abort_if_errors();
let mut log = if cfg!(debug_assertions) {
Some(File::create(concat!(env!("CARGO_MANIFEST_DIR"), "/target/out/log.txt")).unwrap())
} else {
None
};
if std::env::var("SHOULD_RUN").is_ok()
&& tcx.sess.crate_types.get().contains(&CrateType::Executable)
|
run_aot(tcx, metadata, need_metadata_module, &mut log)
}
#[cfg(not(target_arch = "wasm32"))]
fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) ->! {
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut jit_builder = SimpleJITBuilder::with_isa(
crate::build_isa(tcx.sess, false),
cranelift_module::default_libcall_names(),
);
jit_builder.symbols(imported_symbols);
let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder);
assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type());
let sig = Signature {
params: vec![
AbiParam::new(jit_module.target_config().pointer_type()),
AbiParam::new(jit_module.target_config().pointer_type()),
],
returns: vec![AbiParam::new(
jit_module.target_config().pointer_type(), /*isize*/
)],
call_conv: CallConv::SystemV,
};
let main_func_id = jit_module
.declare_function("main", Linkage::Import, &sig)
.unwrap();
codegen_cgus(tcx, &mut jit_module, &mut None, log);
crate::allocator::codegen(tcx.sess, &mut jit_module);
jit_module.finalize_definitions();
tcx.sess.abort_if_errors();
let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id);
println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set");
let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
unsafe { ::std::mem::transmute(finalized_main) };
let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new());
let args = args
.split(" ")
.chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string()))
.map(|arg| CString::new(arg).unwrap())
.collect::<Vec<_>>();
let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
// TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end
let ret = f(args.len() as c_int, argv.as_ptr());
jit_module.finish();
std::process::exit(ret);
}
fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> {
use rustc::middle::dependency_format::Linkage;
let mut dylib_paths = Vec::new();
let crate_info = CrateInfo::new(tcx);
let formats = tcx.sess.dependency_formats.borrow();
let data = formats.get(&CrateType::Executable).unwrap();
for &(cnum, _) in &crate_info.used_crates_dynamic {
let src = &crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
Linkage::Static => {
let name = tcx.crate_name(cnum);
let mut err = tcx
.sess
.struct_err(&format!("Can't load static lib {}", name.as_str()));
err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
err.emit();
}
Linkage::Dynamic => {
dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
}
}
}
let mut imported_symbols = Vec::new();
for path in dylib_paths {
use object::Object;
let lib = libloading::Library::new(&path).unwrap();
let obj = std::fs::read(path).unwrap();
let obj = object::File::parse(&obj).unwrap();
imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| {
let name = symbol.name().unwrap().to_string();
if name.is_empty() ||!symbol.is_global() || symbol.is_undefined() {
return None;
}
let symbol: libloading::Symbol<*const u8> =
unsafe { lib.get(name.as_bytes()) }.unwrap();
Some((name, *symbol))
}));
std::mem::forget(lib)
}
tcx.sess.abort_if_errors();
imported_symbols
}
fn run_aot(
tcx: TyCtxt<'_>,
metadata: EncodedMetadata,
need_metadata_module: bool,
log: &mut Option<File>,
) -> Box<CodegenResults> {
let new_module = |name: String| {
let module: Module<FaerieBackend> = Module::new(
FaerieBuilder::new(
crate::build_isa(tcx.sess, true),
name + ".o",
FaerieTrapCollection::Disabled,
cranelift_module::default_libcall_names(),
)
.unwrap(),
);
assert_eq!(pointer_ty(tcx), module.target_config().pointer_type());
module
};
let emit_module = |kind: ModuleKind,
mut module: Module<FaerieBackend>,
debug: Option<DebugContext>| {
module.finalize_definitions();
let mut artifact = module.finish().artifact;
if let Some(mut debug) = debug {
debug.emit(&mut artifact);
}
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&artifact.name));
let obj = artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
CompiledModule {
name: artifact.name,
kind,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
}
};
let mut faerie_module = new_module("some_file".to_string());
let mut debug = if tcx.sess.opts.debuginfo!= DebugInfo::None
// macOS debuginfo doesn't work yet (see #303)
&&!tcx.sess.target.target.options.is_like_osx
{
let debug = DebugContext::new(
tcx,
faerie_module.target_config().pointer_type().bytes() as u8,
);
Some(debug)
} else {
None
};
codegen_cgus(tcx, &mut faerie_module, &mut debug, log);
tcx.sess.abort_if_errors();
let mut allocator_module = new_module("allocator_shim".to_string());
let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module);
rustc_incremental::assert_dep_graph(tcx);
rustc_incremental::save_dep_graph(tcx);
rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE));
let metadata_module = if need_metadata_module {
use rustc::mir::mono::CodegenUnitNameBuilder;
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
let metadata_cgu_name = cgu_name_builder
.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
.as_str()
.to_string();
let mut metadata_artifact = faerie::Artifact::new(
crate::build_isa(tcx.sess, true).triple().clone(),
metadata_cgu_name.clone(),
);
crate::metadata::write_metadata(tcx, &mut metadata_artifact);
let tmp_file = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
let obj = metadata_artifact.emit().unwrap();
std::fs::write(&tmp_file, obj).unwrap();
Some(CompiledModule {
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(tmp_file),
bytecode: None,
bytecode_compressed: None,
})
} else {
None
};
Box::new(CodegenResults {
crate_name: tcx.crate_name(LOCAL_CRATE),
modules: vec![emit_module(
ModuleKind::Regular,
faerie_module,
debug,
)],
allocator_module: if created_alloc_shim {
Some(emit_module(
ModuleKind::Allocator,
allocator_module,
None,
))
} else {
None
},
metadata_module,
crate_hash: tcx.crate_hash(LOCAL_CRATE),
metadata,
windows_subsystem: None, // Windows is not yet supported
linker_info: LinkerInfo::new(tcx),
crate_info: CrateInfo::new(tcx),
})
}
fn codegen_cgus<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug: &mut Option<DebugContext<'tcx>>,
log: &mut Option<File>,
) {
let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
let mono_items = cgus
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
.collect::<FxHashMap<_, (_, _)>>();
codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items);
crate::main_shim::maybe_create_entry_wrapper(tcx, module);
}
fn codegen_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut Module<impl Backend +'static>,
debug_context: Option<&mut DebugContext<'tcx>>,
log: &mut Option<File>,
mono_items: FxHashMap<MonoItem<'tcx>, (RLinkage, Visibility)>,
) {
let mut cx = CodegenCx::new(tcx, module, debug_context);
time("codegen mono items", move || {
for (mono_item, (linkage, visibility)) in mono_items {
crate::unimpl::try_unimpl(tcx, log, || {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
trans_mono_item(&mut cx, mono_item, linkage);
});
}
cx.finalize();
});
}
fn trans_mono_item<'clif, 'tcx, B: Backend +'static>(
cx: &mut crate::CodegenCx<'clif, 'tcx, B>,
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
let tcx = cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name.as_str()));
debug_assert!(!inst.substs.needs_infer());
let _mir_guard = PrintOnPanic(|| {
match inst.def {
InstanceDef::Item(_)
| InstanceDef::DropGlue(_, _)
| InstanceDef::Virtual(_, _) => {
let mut mir = ::std::io::Cursor::new(Vec::new());
crate::rustc_mir::util::write_mir_pretty(
tcx,
Some(inst.def_id()),
&mut mir,
)
.unwrap();
String::from_utf8(mir.into_inner()).unwrap()
}
_ => {
// FIXME fix write_mir_pretty for these instances
format!("{:#?}", tcx.instance_mir(inst.def))
}
}
});
crate::base::trans_fn(cx, inst, linkage);
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(node_id) => tcx
.sess
.fatal(&format!("Unimplemented global asm mono item {:?}", node_id)),
}
}
fn time<R>(name: &str, f: impl FnOnce() -> R) -> R {
println!("[{}] start", name);
let before = std::time::Instant::now();
let res = f();
let after = std::time::Instant::now();
println!("[{}] end time: {:?}", name, after - before);
res
}
| {
#[cfg(not(target_arch = "wasm32"))]
let _: ! = run_jit(tcx, &mut log);
#[cfg(target_arch = "wasm32")]
panic!("jit not supported on wasm");
} | conditional_block |
lib.rs | //! This crate implements various functions that help speed up dynamic
//! programming, most importantly the SMAWK algorithm for finding row
//! or column minima in a totally monotone matrix with *m* rows and
//! *n* columns in time O(*m* + *n*). This is much better than the
//! brute force solution which would take O(*mn*). When *m* and *n*
//! are of the same order, this turns a quadratic function into a
//! linear function.
//!
//! # Examples
//!
//! Computing the column minima of an *m* ✕ *n* Monge matrix can be
//! done efficiently with `smawk_column_minima`:
//!
//! ```
//! use smawk::{Matrix, smawk_column_minima};
//!
//! let matrix = vec![
//! vec![3, 2, 4, 5, 6],
//! vec![2, 1, 3, 3, 4],
//! vec![2, 1, 3, 3, 4],
//! vec![3, 2, 4, 3, 4],
//! vec![4, 3, 2, 1, 1],
//! ];
//! let minima = vec![1, 1, 4, 4, 4];
//! assert_eq!(smawk_column_minima(&matrix), minima);
//! ```
//!
//! The `minima` vector gives the index of the minimum value per
//! column, so `minima[0] == 1` since the minimum value in the first
//! column is 2 (row 1). Note that the smallest row index is returned.
//!
//! # Definitions
//!
//! Some of the functions in this crate only work on matrices that are
//! *totally monotone*, which we will define below.
//!
//! ## Monotone Matrices
//!
//! We start with a helper definition. Given an *m* ✕ *n* matrix `M`,
//! we say that `M` is *monotone* when the minimum value of row `i` is
//! found to the left of the minimum value in row `i'` where `i < i'`.
//!
//! More formally, if we let `rm(i)` denote the column index of the
//! left-most minimum value in row `i`, then we have
//!
//! ```text
//! rm(0) ≤ rm(1) ≤... ≤ rm(m - 1)
//! ```
//!
//! This means that as you go down the rows from top to bottom, the
//! row-minima proceed from left to right.
//!
//! The algorithms in this crate deal with finding such row- and
//! column-minima.
//!
//! ## Totally Monotone Matrices
//!
//! We say that a matrix `M` is *totally monotone* when every
//! sub-matrix is monotone. A sub-matrix is formed by the intersection
//! of any two rows `i < i'` and any two columns `j < j'`.
//!
//! This is often expressed as via this equivalent condition:
//!
//! ```text
//! M[i, j] > M[i, j'] => M[i', j] > M[i', j']
//! ```
//!
//! for all `i < i'` and `j < j'`.
//!
//! ## Monge Property for Matrices
//!
//! A matrix `M` is said to fulfill the *Monge property* if
//!
//! ```text
//! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j]
//! ```
//!
//! for all `i < i'` and `j < j'`. This says that given any rectangle
//! in the matrix, the sum of the top-left and bottom-right corners is
//! less than or equal to the sum of the bottom-left and upper-right
//! corners.
//!
//! All Monge matrices are totally monotone, so it is enough to
//! establish that the Monge property holds in order to use a matrix
//! with the functions in this crate. If your program is dealing with
//! unknown inputs, it can use [`monge::is_monge`] to verify that a
//! matrix is a Monge matrix.
#![doc(html_root_url = "https://docs.rs/smawk/0.3.1")]
#[cfg(feature = "ndarray")]
pub mod brute_force;
pub mod monge;
#[cfg(feature = "ndarray")]
pub mod recursive;
/// Minimal matrix trait for two-dimensional arrays.
///
/// This provides the functionality needed to represent a read-only
/// numeric matrix. You can query the size of the matrix and access
/// elements. Modeled after [`ndarray::Array2`] from the [ndarray
/// crate](https://crates.io/crates/ndarray).
///
/// Enable the `ndarray` Cargo feature if you want to use it with
/// `ndarray::Array2`.
pub trait Matrix<T: Copy> {
/// Return the number of rows.
fn nrows(&self) -> usize;
/// Return the number of columns.
fn ncols(&self) -> usize;
/// Return a matrix element.
fn index(&self, row: usize, column: usize) -> T;
}
/// Simple and inefficient matrix representation used for doctest
/// examples and simple unit tests.
///
/// You should prefer implementing it yourself, or you can enable the
/// `ndarray` Cargo feature and use the provided implementation for
/// [`ndarray::Array2`].
impl<T: Copy> Matrix<T> for Vec<Vec<T>> {
fn nrows(&self) -> usize {
self.len()
}
fn ncols(&self) -> usize {
self[0].len()
}
fn index(&self, row: usize, column: usize) -> T {
self[row][column]
}
}
/// Adapting [`ndarray::Array2`] to the `Matrix` trait.
///
/// **Note: this implementation is only available if you enable the
/// `ndarray` Cargo feature.**
#[cfg(feature = "ndarray")]
impl<T: Copy> Matrix<T> for ndarray::Array2<T> {
#[inline]
fn nrows(&self) -> usize {
self.nrows()
}
#[inline]
fn ncols(&self) -> usize {
self.ncols()
}
#[inline]
fn index(&self, row: usize, column: usize) -> T {
self[[row, column]]
}
}
/// Compute row minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding row minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
// Benchmarking shows that SMAWK performs roughly the same on row-
// and column-major matrices.
let mut minima = vec![0; matrix.nrows()];
smawk_inner(
&|j, i| matrix.index(i, j),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding column minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
let mut minima = vec![0; matrix.ncols()];
smawk_inner(
&|i, j| matrix.index(i, j),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in the given area of the matrix. The
/// `minima` slice is updated inplace.
fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>(
matrix: &M,
rows: &[usize],
cols: &[usize],
mut minima: &mut [usize],
) {
if cols.is_empty() {
return;
}
let mut stack = Vec::with_capacity(cols.len());
for r in rows {
// TODO: use stack.last() instead of stack.is_empty() etc
while!stack.is_empty()
&& matrix(stack[stack.len() - 1], cols[stack.len() - 1])
> matrix(*r, cols[stack.len() - 1])
{
stack.pop();
}
if stack.len()!= cols.len() {
stack.push(*r);
}
}
let rows = &stack;
let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2);
for (idx, c) in cols.iter().enumerate() {
if idx % 2 == 1 {
odd_cols.push(*c);
}
}
smawk_inner(matrix, rows, &odd_cols, &mut minima);
let mut r = 0;
for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) {
let mut row = rows[r];
let last_row = if c == cols.len() - 1 {
rows[rows.len() - 1]
} else {
minima[cols[c + 1]]
};
let mut pair = (matrix(row, col), row);
while row!= last_row {
r += 1;
row = rows[r];
if (matrix(row, col), row) < pair {
pair = (matrix(row, col), row);
}
}
minima[col] = pair.1;
}
}
/// Compute upper-right column minima in O(*m* + *n*) time.
///
/// The input matrix must be totally monotone.
///
/// The function returns a vector of `(usize, T)`. The `usize` in the
/// tuple at index `j` tells you the row of the minimum value in
/// column `j` and the `T` value is minimum value itself.
///
/// The algorithm only considers values above the main diagonal, which
/// means that it computes values `v(j)` where:
///
/// ```text
/// v(0) = initial
/// v(j) = min { M[i, j] | i < j } for j > 0
/// ```
///
/// If we let `r(j)` denote the row index of the minimum value in
/// column `j`, the tuples in the result vector become `(r(j), M[r(j),
/// j])`.
///
/// The algorithm is an *online* algorithm, in the sense that `matrix`
/// function can refer back to previously computed column minima when
/// determining an entry in the matrix. The guarantee is that we only
/// call `matrix(i, j)` after having computed `v(i)`. This is
/// reflected in the `&[(usize, T)]` argument to `matrix`, which grows
/// as more and more values are computed.
pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>(
initial: T,
size: usize,
matrix: M,
) -> Vec<(usize, T)> {
let mut result = vec![(0, initial)];
// State used by the algorithm.
let mut finished = 0;
let mut base = 0;
let mut tentative = 0;
// Shorthand for evaluating the matrix. We need a macro here since
// we don't want to borrow the result vector.
macro_rules! m {
($i:expr, $j:expr) => {{
assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j);
assert!(
$i < size && $j < size,
"(i, j) out of bounds: ({}, {}), size: {}",
$i,
$j,
size
);
matrix(&result[..finished + 1], $i, $j)
}};
}
// Keep going until we have finished all size columns. Since the
// columns are zero-indexed, we're done when finished == size - 1.
while finished < size - 1 {
// First case: we have already advanced past the previous
// tentative value. We make a new tentative value by applying
// smawk_inner to the largest square submatrix that fits under
// the base.
let i = finished + 1;
if i > tentative {
let rows = (base..finished + 1).collect::<Vec<_>>();
tentative = std::cmp::min(finished + rows.len(), size - 1);
let cols = (finished + 1..tentative + 1).collect::<Vec<_>>();
let mut minima = vec![0; tentative + 1];
smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima);
for col in cols {
let row = minima[col];
let v = m![row, col];
if col >= result.len() {
result.push((row, v));
} else if v < result[col].1 {
result[col] = (row, v);
}
}
finished = i;
continue;
}
// Second case: the new column minimum is on the diagonal. All
// subsequent ones will be at least as low, so we can clear
// out all our work from higher rows. As in the fourth case,
// the loss of tentative is amortized against the increase in
// base.
let diag = m![i - 1, i];
if diag < result[i].1 {
re | rd case: row i-1 does not supply a column minimum in any
// column up to tentative. We simply advance finished while
// maintaining the invariant.
if m![i - 1, tentative] >= result[tentative].1 {
finished = i;
continue;
}
// Fourth and final case: a new column minimum at tentative.
// This allows us to make progress by incorporating rows prior
// to finished into the base. The base invariant holds because
// these rows cannot supply any later column minima. The work
// done when we last advanced tentative (and undone by this
// step) can be amortized against the increase in base.
base = i - 1;
tentative = i;
finished = i;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smawk_1x1() {
let matrix = vec![vec![2]];
assert_eq!(smawk_row_minima(&matrix), vec![0]);
assert_eq!(smawk_column_minima(&matrix), vec![0]);
}
#[test]
fn smawk_2x1() {
let matrix = vec![
vec![3], //
vec![2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1]);
}
#[test]
fn smawk_1x2() {
let matrix = vec![vec![2, 1]];
assert_eq!(smawk_row_minima(&matrix), vec![1]);
assert_eq!(smawk_column_minima(&matrix), vec![0, 0]);
}
#[test]
fn smawk_2x2() {
let matrix = vec![
vec![3, 2], //
vec![2, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn smawk_3x3() {
let matrix = vec![
vec![3, 4, 4], //
vec![3, 4, 4],
vec![2, 3, 3],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![2, 2, 2]);
}
#[test]
fn smawk_4x4() {
let matrix = vec![
vec![4, 5, 5, 5], //
vec![2, 3, 3, 3],
vec![2, 3, 3, 3],
vec![2, 2, 2, 2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 3, 3, 3]);
}
#[test]
fn smawk_5x5() {
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1, 1, 1, 3]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1, 4, 4, 4]);
}
#[test]
fn online_1x1() {
let matrix = vec![vec![0]];
let minima = vec![(0, 0)];
assert_eq!(online_column_minima(0, 1, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_2x2() {
let matrix = vec![
vec![0, 2], //
vec![0, 0],
];
let minima = vec![(0, 0), (0, 2)];
assert_eq!(online_column_minima(0, 2, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_3x3() {
let matrix = vec![
vec![0, 4, 4], //
vec![0, 0, 4],
vec![0, 0, 0],
];
let minima = vec![(0, 0), (0, 4), (0, 4)];
assert_eq!(online_column_minima(0, 3, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_4x4() {
let matrix = vec![
vec![0, 5, 5, 5], //
vec![0, 0, 3, 3],
vec![0, 0, 0, 3],
vec![0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 5), (1, 3), (1, 3)];
assert_eq!(online_column_minima(0, 4, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_5x5() {
let matrix = vec![
vec![0, 2, 4, 6, 7],
vec![0, 0, 3, 4, 5],
vec![0, 0, 0, 3, 4],
vec![0, 0, 0, 0, 4],
vec![0, 0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 2), (1, 3), (2, 3), (2, 4)];
assert_eq!(online_column_minima(0, 5, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn smawk_works_with_partial_ord() {
let matrix = vec![
vec![3.0, 2.0], //
vec![2.0, 1.0],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn online_works_with_partial_ord() {
let matrix = vec![
vec![0.0, 2.0], //
vec![0.0, 0.0],
];
let minima = vec![(0, 0.0), (0, 2.0)];
assert_eq!(online_column_minima(0.0, 2, |_, i:usize, j:usize| matrix[i][j]), minima);
}
}
| sult[i] = (i - 1, diag);
base = i - 1;
tentative = i;
finished = i;
continue;
}
// Thi | conditional_block |
lib.rs | //! This crate implements various functions that help speed up dynamic
//! programming, most importantly the SMAWK algorithm for finding row
//! or column minima in a totally monotone matrix with *m* rows and
//! *n* columns in time O(*m* + *n*). This is much better than the
//! brute force solution which would take O(*mn*). When *m* and *n*
//! are of the same order, this turns a quadratic function into a
//! linear function.
//!
//! # Examples
//!
//! Computing the column minima of an *m* ✕ *n* Monge matrix can be
//! done efficiently with `smawk_column_minima`:
//!
//! ```
//! use smawk::{Matrix, smawk_column_minima};
//!
//! let matrix = vec![
//! vec![3, 2, 4, 5, 6],
//! vec![2, 1, 3, 3, 4],
//! vec![2, 1, 3, 3, 4],
//! vec![3, 2, 4, 3, 4],
//! vec![4, 3, 2, 1, 1],
//! ];
//! let minima = vec![1, 1, 4, 4, 4];
//! assert_eq!(smawk_column_minima(&matrix), minima);
//! ```
//!
//! The `minima` vector gives the index of the minimum value per
//! column, so `minima[0] == 1` since the minimum value in the first
//! column is 2 (row 1). Note that the smallest row index is returned.
//!
//! # Definitions
//!
//! Some of the functions in this crate only work on matrices that are
//! *totally monotone*, which we will define below.
//!
//! ## Monotone Matrices
//!
//! We start with a helper definition. Given an *m* ✕ *n* matrix `M`,
//! we say that `M` is *monotone* when the minimum value of row `i` is
//! found to the left of the minimum value in row `i'` where `i < i'`.
//!
//! More formally, if we let `rm(i)` denote the column index of the
//! left-most minimum value in row `i`, then we have
//!
//! ```text
//! rm(0) ≤ rm(1) ≤... ≤ rm(m - 1)
//! ```
//!
//! This means that as you go down the rows from top to bottom, the
//! row-minima proceed from left to right.
//!
//! The algorithms in this crate deal with finding such row- and
//! column-minima.
//!
//! ## Totally Monotone Matrices
//!
//! We say that a matrix `M` is *totally monotone* when every
//! sub-matrix is monotone. A sub-matrix is formed by the intersection
//! of any two rows `i < i'` and any two columns `j < j'`.
//!
//! This is often expressed as via this equivalent condition:
//!
//! ```text
//! M[i, j] > M[i, j'] => M[i', j] > M[i', j']
//! ```
//!
//! for all `i < i'` and `j < j'`.
//!
//! ## Monge Property for Matrices
//!
//! A matrix `M` is said to fulfill the *Monge property* if
//!
//! ```text
//! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j]
//! ```
//!
//! for all `i < i'` and `j < j'`. This says that given any rectangle
//! in the matrix, the sum of the top-left and bottom-right corners is
//! less than or equal to the sum of the bottom-left and upper-right
//! corners.
//!
//! All Monge matrices are totally monotone, so it is enough to
//! establish that the Monge property holds in order to use a matrix
//! with the functions in this crate. If your program is dealing with
//! unknown inputs, it can use [`monge::is_monge`] to verify that a
//! matrix is a Monge matrix.
#![doc(html_root_url = "https://docs.rs/smawk/0.3.1")]
#[cfg(feature = "ndarray")]
pub mod brute_force;
pub mod monge;
#[cfg(feature = "ndarray")]
pub mod recursive;
/// Minimal matrix trait for two-dimensional arrays.
///
/// This provides the functionality needed to represent a read-only
/// numeric matrix. You can query the size of the matrix and access
/// elements. Modeled after [`ndarray::Array2`] from the [ndarray
/// crate](https://crates.io/crates/ndarray).
///
/// Enable the `ndarray` Cargo feature if you want to use it with
/// `ndarray::Array2`.
pub trait Matrix<T: Copy> {
/// Return the number of rows.
fn nrows(&self) -> usize;
/// Return the number of columns.
fn ncols(&self) -> usize;
/// Return a matrix element.
fn index(&self, row: usize, column: usize) -> T;
}
/// Simple and inefficient matrix representation used for doctest
/// examples and simple unit tests.
///
/// You should prefer implementing it yourself, or you can enable the
/// `ndarray` Cargo feature and use the provided implementation for
/// [`ndarray::Array2`].
impl<T: Copy> Matrix<T> for Vec<Vec<T>> {
fn nrows(&self) -> usize {
self.len()
}
fn ncols(&self) -> usize {
self[0].len()
}
fn index(&self, row: usize, column: usize) -> T {
self[row][column]
}
}
/// Adapting [`ndarray::Array2`] to the `Matrix` trait.
///
/// **Note: this implementation is only available if you enable the
/// `ndarray` Cargo feature.**
#[cfg(feature = "ndarray")]
impl<T: Copy> Matrix<T> for ndarray::Array2<T> {
#[inline]
fn nrows(&self) -> usize {
self.nrows()
}
#[inline]
fn ncols(&self) -> usize {
self.ncols()
}
#[inline]
fn index(&self, row: usize, column: usize) -> T {
self[[row, column]]
}
}
/// Compute row minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding row minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
// Benchmarking shows that SMAWK performs roughly the same on row-
// and column-major matrices.
let mut minima = vec![0; matrix.nrows()];
smawk_inner(
&|j, i| matrix.index(i, j),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding column minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
let mut minima = vec![0; matrix.ncols()];
smawk_inner(
&|i, j| matrix.index(i, j),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in the given area of the matrix. The
/// `minima` slice is updated inplace.
fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>(
matrix: &M,
rows: &[usize],
cols: &[usize],
mut minima: &mut [usize],
) {
if cols.is_empty() {
return;
}
let mut stack = Vec::with_capacity(cols.len());
for r in rows {
// TODO: use stack.last() instead of stack.is_empty() etc
while!stack.is_empty()
&& matrix(stack[stack.len() - 1], cols[stack.len() - 1])
> matrix(*r, cols[stack.len() - 1])
{
stack.pop();
}
if stack.len()!= cols.len() {
stack.push(*r);
}
}
let rows = &stack;
let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2);
for (idx, c) in cols.iter().enumerate() {
if idx % 2 == 1 {
odd_cols.push(*c);
}
}
smawk_inner(matrix, rows, &odd_cols, &mut minima);
let mut r = 0;
for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) {
let mut row = rows[r];
let last_row = if c == cols.len() - 1 {
rows[rows.len() - 1]
} else {
minima[cols[c + 1]]
};
let mut pair = (matrix(row, col), row);
while row!= last_row {
r += 1;
row = rows[r];
if (matrix(row, col), row) < pair {
pair = (matrix(row, col), row);
}
}
minima[col] = pair.1;
}
}
/// Compute upper-right column minima in O(*m* + *n*) time.
///
/// The input matrix must be totally monotone.
///
/// The function returns a vector of `(usize, T)`. The `usize` in the
/// tuple at index `j` tells you the row of the minimum value in
/// column `j` and the `T` value is minimum value itself.
///
/// The algorithm only considers values above the main diagonal, which
/// means that it computes values `v(j)` where:
///
/// ```text
/// v(0) = initial
/// v(j) = min { M[i, j] | i < j } for j > 0
/// ```
///
/// If we let `r(j)` denote the row index of the minimum value in
/// column `j`, the tuples in the result vector become `(r(j), M[r(j),
/// j])`.
///
/// The algorithm is an *online* algorithm, in the sense that `matrix`
/// function can refer back to previously computed column minima when
/// determining an entry in the matrix. The guarantee is that we only
/// call `matrix(i, j)` after having computed `v(i)`. This is
/// reflected in the `&[(usize, T)]` argument to `matrix`, which grows
/// as more and more values are computed.
pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>(
initial: T,
size: usize,
matrix: M,
) -> Vec<(usize, T)> {
let mut result = vec![(0, initial)];
// State used by the algorithm.
let mut finished = 0;
let mut base = 0;
let mut tentative = 0;
// Shorthand for evaluating the matrix. We need a macro here since
// we don't want to borrow the result vector.
macro_rules! m {
($i:expr, $j:expr) => {{
assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j);
assert!(
$i < size && $j < size,
"(i, j) out of bounds: ({}, {}), size: {}",
$i,
$j,
size
);
matrix(&result[..finished + 1], $i, $j)
}};
}
// Keep going until we have finished all size columns. Since the
// columns are zero-indexed, we're done when finished == size - 1.
while finished < size - 1 {
// First case: we have already advanced past the previous
// tentative value. We make a new tentative value by applying
// smawk_inner to the largest square submatrix that fits under
// the base.
let i = finished + 1;
if i > tentative {
let rows = (base..finished + 1).collect::<Vec<_>>();
tentative = std::cmp::min(finished + rows.len(), size - 1);
let cols = (finished + 1..tentative + 1).collect::<Vec<_>>();
let mut minima = vec![0; tentative + 1];
smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima);
for col in cols {
let row = minima[col];
let v = m![row, col];
if col >= result.len() {
result.push((row, v));
} else if v < result[col].1 {
result[col] = (row, v);
}
}
finished = i;
continue;
}
// Second case: the new column minimum is on the diagonal. All
// subsequent ones will be at least as low, so we can clear
// out all our work from higher rows. As in the fourth case,
// the loss of tentative is amortized against the increase in
// base.
let diag = m![i - 1, i];
if diag < result[i].1 {
result[i] = (i - 1, diag);
base = i - 1;
tentative = i;
finished = i;
continue;
}
// Third case: row i-1 does not supply a column minimum in any
// column up to tentative. We simply advance finished while
// maintaining the invariant.
if m![i - 1, tentative] >= result[tentative].1 {
finished = i;
continue;
}
// Fourth and final case: a new column minimum at tentative.
// This allows us to make progress by incorporating rows prior
// to finished into the base. The base invariant holds because
// these rows cannot supply any later column minima. The work
// done when we last advanced tentative (and undone by this
// step) can be amortized against the increase in base.
base = i - 1;
tentative = i;
finished = i;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smawk_1x1() {
let matrix = vec![vec![2]];
assert_eq!(smawk_row_minima(&matrix), vec![0]);
assert_eq!(smawk_column_minima(&matrix), vec![0]);
}
#[test]
fn smawk_2x1() {
| matrix = vec![
vec![3], //
vec![2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1]);
}
#[test]
fn smawk_1x2() {
let matrix = vec![vec![2, 1]];
assert_eq!(smawk_row_minima(&matrix), vec![1]);
assert_eq!(smawk_column_minima(&matrix), vec![0, 0]);
}
#[test]
fn smawk_2x2() {
let matrix = vec![
vec![3, 2], //
vec![2, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn smawk_3x3() {
let matrix = vec![
vec![3, 4, 4], //
vec![3, 4, 4],
vec![2, 3, 3],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![2, 2, 2]);
}
#[test]
fn smawk_4x4() {
let matrix = vec![
vec![4, 5, 5, 5], //
vec![2, 3, 3, 3],
vec![2, 3, 3, 3],
vec![2, 2, 2, 2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 3, 3, 3]);
}
#[test]
fn smawk_5x5() {
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1, 1, 1, 3]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1, 4, 4, 4]);
}
#[test]
fn online_1x1() {
let matrix = vec![vec![0]];
let minima = vec![(0, 0)];
assert_eq!(online_column_minima(0, 1, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_2x2() {
let matrix = vec![
vec![0, 2], //
vec![0, 0],
];
let minima = vec![(0, 0), (0, 2)];
assert_eq!(online_column_minima(0, 2, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_3x3() {
let matrix = vec![
vec![0, 4, 4], //
vec![0, 0, 4],
vec![0, 0, 0],
];
let minima = vec![(0, 0), (0, 4), (0, 4)];
assert_eq!(online_column_minima(0, 3, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_4x4() {
let matrix = vec![
vec![0, 5, 5, 5], //
vec![0, 0, 3, 3],
vec![0, 0, 0, 3],
vec![0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 5), (1, 3), (1, 3)];
assert_eq!(online_column_minima(0, 4, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_5x5() {
let matrix = vec![
vec![0, 2, 4, 6, 7],
vec![0, 0, 3, 4, 5],
vec![0, 0, 0, 3, 4],
vec![0, 0, 0, 0, 4],
vec![0, 0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 2), (1, 3), (2, 3), (2, 4)];
assert_eq!(online_column_minima(0, 5, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn smawk_works_with_partial_ord() {
let matrix = vec![
vec![3.0, 2.0], //
vec![2.0, 1.0],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn online_works_with_partial_ord() {
let matrix = vec![
vec![0.0, 2.0], //
vec![0.0, 0.0],
];
let minima = vec![(0, 0.0), (0, 2.0)];
assert_eq!(online_column_minima(0.0, 2, |_, i:usize, j:usize| matrix[i][j]), minima);
}
}
| let | identifier_name |
lib.rs | //! This crate implements various functions that help speed up dynamic
//! programming, most importantly the SMAWK algorithm for finding row
//! or column minima in a totally monotone matrix with *m* rows and
//! *n* columns in time O(*m* + *n*). This is much better than the
//! brute force solution which would take O(*mn*). When *m* and *n*
//! are of the same order, this turns a quadratic function into a
//! linear function.
//!
//! # Examples
//!
//! Computing the column minima of an *m* ✕ *n* Monge matrix can be
//! done efficiently with `smawk_column_minima`:
//! | //! vec![2, 1, 3, 3, 4],
//! vec![2, 1, 3, 3, 4],
//! vec![3, 2, 4, 3, 4],
//! vec![4, 3, 2, 1, 1],
//! ];
//! let minima = vec![1, 1, 4, 4, 4];
//! assert_eq!(smawk_column_minima(&matrix), minima);
//! ```
//!
//! The `minima` vector gives the index of the minimum value per
//! column, so `minima[0] == 1` since the minimum value in the first
//! column is 2 (row 1). Note that the smallest row index is returned.
//!
//! # Definitions
//!
//! Some of the functions in this crate only work on matrices that are
//! *totally monotone*, which we will define below.
//!
//! ## Monotone Matrices
//!
//! We start with a helper definition. Given an *m* ✕ *n* matrix `M`,
//! we say that `M` is *monotone* when the minimum value of row `i` is
//! found to the left of the minimum value in row `i'` where `i < i'`.
//!
//! More formally, if we let `rm(i)` denote the column index of the
//! left-most minimum value in row `i`, then we have
//!
//! ```text
//! rm(0) ≤ rm(1) ≤... ≤ rm(m - 1)
//! ```
//!
//! This means that as you go down the rows from top to bottom, the
//! row-minima proceed from left to right.
//!
//! The algorithms in this crate deal with finding such row- and
//! column-minima.
//!
//! ## Totally Monotone Matrices
//!
//! We say that a matrix `M` is *totally monotone* when every
//! sub-matrix is monotone. A sub-matrix is formed by the intersection
//! of any two rows `i < i'` and any two columns `j < j'`.
//!
//! This is often expressed as via this equivalent condition:
//!
//! ```text
//! M[i, j] > M[i, j'] => M[i', j] > M[i', j']
//! ```
//!
//! for all `i < i'` and `j < j'`.
//!
//! ## Monge Property for Matrices
//!
//! A matrix `M` is said to fulfill the *Monge property* if
//!
//! ```text
//! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j]
//! ```
//!
//! for all `i < i'` and `j < j'`. This says that given any rectangle
//! in the matrix, the sum of the top-left and bottom-right corners is
//! less than or equal to the sum of the bottom-left and upper-right
//! corners.
//!
//! All Monge matrices are totally monotone, so it is enough to
//! establish that the Monge property holds in order to use a matrix
//! with the functions in this crate. If your program is dealing with
//! unknown inputs, it can use [`monge::is_monge`] to verify that a
//! matrix is a Monge matrix.
#![doc(html_root_url = "https://docs.rs/smawk/0.3.1")]
#[cfg(feature = "ndarray")]
pub mod brute_force;
pub mod monge;
#[cfg(feature = "ndarray")]
pub mod recursive;
/// Minimal matrix trait for two-dimensional arrays.
///
/// This provides the functionality needed to represent a read-only
/// numeric matrix. You can query the size of the matrix and access
/// elements. Modeled after [`ndarray::Array2`] from the [ndarray
/// crate](https://crates.io/crates/ndarray).
///
/// Enable the `ndarray` Cargo feature if you want to use it with
/// `ndarray::Array2`.
pub trait Matrix<T: Copy> {
/// Return the number of rows.
fn nrows(&self) -> usize;
/// Return the number of columns.
fn ncols(&self) -> usize;
/// Return a matrix element.
fn index(&self, row: usize, column: usize) -> T;
}
/// Simple and inefficient matrix representation used for doctest
/// examples and simple unit tests.
///
/// You should prefer implementing it yourself, or you can enable the
/// `ndarray` Cargo feature and use the provided implementation for
/// [`ndarray::Array2`].
impl<T: Copy> Matrix<T> for Vec<Vec<T>> {
fn nrows(&self) -> usize {
self.len()
}
fn ncols(&self) -> usize {
self[0].len()
}
fn index(&self, row: usize, column: usize) -> T {
self[row][column]
}
}
/// Adapting [`ndarray::Array2`] to the `Matrix` trait.
///
/// **Note: this implementation is only available if you enable the
/// `ndarray` Cargo feature.**
#[cfg(feature = "ndarray")]
impl<T: Copy> Matrix<T> for ndarray::Array2<T> {
#[inline]
fn nrows(&self) -> usize {
self.nrows()
}
#[inline]
fn ncols(&self) -> usize {
self.ncols()
}
#[inline]
fn index(&self, row: usize, column: usize) -> T {
self[[row, column]]
}
}
/// Compute row minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding row minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
// Benchmarking shows that SMAWK performs roughly the same on row-
// and column-major matrices.
let mut minima = vec![0; matrix.nrows()];
smawk_inner(
&|j, i| matrix.index(i, j),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding column minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
let mut minima = vec![0; matrix.ncols()];
smawk_inner(
&|i, j| matrix.index(i, j),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in the given area of the matrix. The
/// `minima` slice is updated inplace.
fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>(
matrix: &M,
rows: &[usize],
cols: &[usize],
mut minima: &mut [usize],
) {
if cols.is_empty() {
return;
}
let mut stack = Vec::with_capacity(cols.len());
for r in rows {
// TODO: use stack.last() instead of stack.is_empty() etc
while!stack.is_empty()
&& matrix(stack[stack.len() - 1], cols[stack.len() - 1])
> matrix(*r, cols[stack.len() - 1])
{
stack.pop();
}
if stack.len()!= cols.len() {
stack.push(*r);
}
}
let rows = &stack;
let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2);
for (idx, c) in cols.iter().enumerate() {
if idx % 2 == 1 {
odd_cols.push(*c);
}
}
smawk_inner(matrix, rows, &odd_cols, &mut minima);
let mut r = 0;
for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) {
let mut row = rows[r];
let last_row = if c == cols.len() - 1 {
rows[rows.len() - 1]
} else {
minima[cols[c + 1]]
};
let mut pair = (matrix(row, col), row);
while row!= last_row {
r += 1;
row = rows[r];
if (matrix(row, col), row) < pair {
pair = (matrix(row, col), row);
}
}
minima[col] = pair.1;
}
}
/// Compute upper-right column minima in O(*m* + *n*) time.
///
/// The input matrix must be totally monotone.
///
/// The function returns a vector of `(usize, T)`. The `usize` in the
/// tuple at index `j` tells you the row of the minimum value in
/// column `j` and the `T` value is minimum value itself.
///
/// The algorithm only considers values above the main diagonal, which
/// means that it computes values `v(j)` where:
///
/// ```text
/// v(0) = initial
/// v(j) = min { M[i, j] | i < j } for j > 0
/// ```
///
/// If we let `r(j)` denote the row index of the minimum value in
/// column `j`, the tuples in the result vector become `(r(j), M[r(j),
/// j])`.
///
/// The algorithm is an *online* algorithm, in the sense that `matrix`
/// function can refer back to previously computed column minima when
/// determining an entry in the matrix. The guarantee is that we only
/// call `matrix(i, j)` after having computed `v(i)`. This is
/// reflected in the `&[(usize, T)]` argument to `matrix`, which grows
/// as more and more values are computed.
pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>(
initial: T,
size: usize,
matrix: M,
) -> Vec<(usize, T)> {
let mut result = vec![(0, initial)];
// State used by the algorithm.
let mut finished = 0;
let mut base = 0;
let mut tentative = 0;
// Shorthand for evaluating the matrix. We need a macro here since
// we don't want to borrow the result vector.
macro_rules! m {
($i:expr, $j:expr) => {{
assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j);
assert!(
$i < size && $j < size,
"(i, j) out of bounds: ({}, {}), size: {}",
$i,
$j,
size
);
matrix(&result[..finished + 1], $i, $j)
}};
}
// Keep going until we have finished all size columns. Since the
// columns are zero-indexed, we're done when finished == size - 1.
while finished < size - 1 {
// First case: we have already advanced past the previous
// tentative value. We make a new tentative value by applying
// smawk_inner to the largest square submatrix that fits under
// the base.
let i = finished + 1;
if i > tentative {
let rows = (base..finished + 1).collect::<Vec<_>>();
tentative = std::cmp::min(finished + rows.len(), size - 1);
let cols = (finished + 1..tentative + 1).collect::<Vec<_>>();
let mut minima = vec![0; tentative + 1];
smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima);
for col in cols {
let row = minima[col];
let v = m![row, col];
if col >= result.len() {
result.push((row, v));
} else if v < result[col].1 {
result[col] = (row, v);
}
}
finished = i;
continue;
}
// Second case: the new column minimum is on the diagonal. All
// subsequent ones will be at least as low, so we can clear
// out all our work from higher rows. As in the fourth case,
// the loss of tentative is amortized against the increase in
// base.
let diag = m![i - 1, i];
if diag < result[i].1 {
result[i] = (i - 1, diag);
base = i - 1;
tentative = i;
finished = i;
continue;
}
// Third case: row i-1 does not supply a column minimum in any
// column up to tentative. We simply advance finished while
// maintaining the invariant.
if m![i - 1, tentative] >= result[tentative].1 {
finished = i;
continue;
}
// Fourth and final case: a new column minimum at tentative.
// This allows us to make progress by incorporating rows prior
// to finished into the base. The base invariant holds because
// these rows cannot supply any later column minima. The work
// done when we last advanced tentative (and undone by this
// step) can be amortized against the increase in base.
base = i - 1;
tentative = i;
finished = i;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smawk_1x1() {
let matrix = vec![vec![2]];
assert_eq!(smawk_row_minima(&matrix), vec![0]);
assert_eq!(smawk_column_minima(&matrix), vec![0]);
}
#[test]
fn smawk_2x1() {
let matrix = vec![
vec![3], //
vec![2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1]);
}
#[test]
fn smawk_1x2() {
let matrix = vec![vec![2, 1]];
assert_eq!(smawk_row_minima(&matrix), vec![1]);
assert_eq!(smawk_column_minima(&matrix), vec![0, 0]);
}
#[test]
fn smawk_2x2() {
let matrix = vec![
vec![3, 2], //
vec![2, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn smawk_3x3() {
let matrix = vec![
vec![3, 4, 4], //
vec![3, 4, 4],
vec![2, 3, 3],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![2, 2, 2]);
}
#[test]
fn smawk_4x4() {
let matrix = vec![
vec![4, 5, 5, 5], //
vec![2, 3, 3, 3],
vec![2, 3, 3, 3],
vec![2, 2, 2, 2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 3, 3, 3]);
}
#[test]
fn smawk_5x5() {
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1, 1, 1, 3]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1, 4, 4, 4]);
}
#[test]
fn online_1x1() {
let matrix = vec![vec![0]];
let minima = vec![(0, 0)];
assert_eq!(online_column_minima(0, 1, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_2x2() {
let matrix = vec![
vec![0, 2], //
vec![0, 0],
];
let minima = vec![(0, 0), (0, 2)];
assert_eq!(online_column_minima(0, 2, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_3x3() {
let matrix = vec![
vec![0, 4, 4], //
vec![0, 0, 4],
vec![0, 0, 0],
];
let minima = vec![(0, 0), (0, 4), (0, 4)];
assert_eq!(online_column_minima(0, 3, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_4x4() {
let matrix = vec![
vec![0, 5, 5, 5], //
vec![0, 0, 3, 3],
vec![0, 0, 0, 3],
vec![0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 5), (1, 3), (1, 3)];
assert_eq!(online_column_minima(0, 4, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_5x5() {
let matrix = vec![
vec![0, 2, 4, 6, 7],
vec![0, 0, 3, 4, 5],
vec![0, 0, 0, 3, 4],
vec![0, 0, 0, 0, 4],
vec![0, 0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 2), (1, 3), (2, 3), (2, 4)];
assert_eq!(online_column_minima(0, 5, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn smawk_works_with_partial_ord() {
let matrix = vec![
vec![3.0, 2.0], //
vec![2.0, 1.0],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn online_works_with_partial_ord() {
let matrix = vec![
vec![0.0, 2.0], //
vec![0.0, 0.0],
];
let minima = vec![(0, 0.0), (0, 2.0)];
assert_eq!(online_column_minima(0.0, 2, |_, i:usize, j:usize| matrix[i][j]), minima);
}
} | //! ```
//! use smawk::{Matrix, smawk_column_minima};
//!
//! let matrix = vec![
//! vec![3, 2, 4, 5, 6], | random_line_split |
lib.rs | //! This crate implements various functions that help speed up dynamic
//! programming, most importantly the SMAWK algorithm for finding row
//! or column minima in a totally monotone matrix with *m* rows and
//! *n* columns in time O(*m* + *n*). This is much better than the
//! brute force solution which would take O(*mn*). When *m* and *n*
//! are of the same order, this turns a quadratic function into a
//! linear function.
//!
//! # Examples
//!
//! Computing the column minima of an *m* ✕ *n* Monge matrix can be
//! done efficiently with `smawk_column_minima`:
//!
//! ```
//! use smawk::{Matrix, smawk_column_minima};
//!
//! let matrix = vec![
//! vec![3, 2, 4, 5, 6],
//! vec![2, 1, 3, 3, 4],
//! vec![2, 1, 3, 3, 4],
//! vec![3, 2, 4, 3, 4],
//! vec![4, 3, 2, 1, 1],
//! ];
//! let minima = vec![1, 1, 4, 4, 4];
//! assert_eq!(smawk_column_minima(&matrix), minima);
//! ```
//!
//! The `minima` vector gives the index of the minimum value per
//! column, so `minima[0] == 1` since the minimum value in the first
//! column is 2 (row 1). Note that the smallest row index is returned.
//!
//! # Definitions
//!
//! Some of the functions in this crate only work on matrices that are
//! *totally monotone*, which we will define below.
//!
//! ## Monotone Matrices
//!
//! We start with a helper definition. Given an *m* ✕ *n* matrix `M`,
//! we say that `M` is *monotone* when the minimum value of row `i` is
//! found to the left of the minimum value in row `i'` where `i < i'`.
//!
//! More formally, if we let `rm(i)` denote the column index of the
//! left-most minimum value in row `i`, then we have
//!
//! ```text
//! rm(0) ≤ rm(1) ≤... ≤ rm(m - 1)
//! ```
//!
//! This means that as you go down the rows from top to bottom, the
//! row-minima proceed from left to right.
//!
//! The algorithms in this crate deal with finding such row- and
//! column-minima.
//!
//! ## Totally Monotone Matrices
//!
//! We say that a matrix `M` is *totally monotone* when every
//! sub-matrix is monotone. A sub-matrix is formed by the intersection
//! of any two rows `i < i'` and any two columns `j < j'`.
//!
//! This is often expressed as via this equivalent condition:
//!
//! ```text
//! M[i, j] > M[i, j'] => M[i', j] > M[i', j']
//! ```
//!
//! for all `i < i'` and `j < j'`.
//!
//! ## Monge Property for Matrices
//!
//! A matrix `M` is said to fulfill the *Monge property* if
//!
//! ```text
//! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j]
//! ```
//!
//! for all `i < i'` and `j < j'`. This says that given any rectangle
//! in the matrix, the sum of the top-left and bottom-right corners is
//! less than or equal to the sum of the bottom-left and upper-right
//! corners.
//!
//! All Monge matrices are totally monotone, so it is enough to
//! establish that the Monge property holds in order to use a matrix
//! with the functions in this crate. If your program is dealing with
//! unknown inputs, it can use [`monge::is_monge`] to verify that a
//! matrix is a Monge matrix.
#![doc(html_root_url = "https://docs.rs/smawk/0.3.1")]
#[cfg(feature = "ndarray")]
pub mod brute_force;
pub mod monge;
#[cfg(feature = "ndarray")]
pub mod recursive;
/// Minimal matrix trait for two-dimensional arrays.
///
/// This provides the functionality needed to represent a read-only
/// numeric matrix. You can query the size of the matrix and access
/// elements. Modeled after [`ndarray::Array2`] from the [ndarray
/// crate](https://crates.io/crates/ndarray).
///
/// Enable the `ndarray` Cargo feature if you want to use it with
/// `ndarray::Array2`.
pub trait Matrix<T: Copy> {
/// Return the number of rows.
fn nrows(&self) -> usize;
/// Return the number of columns.
fn ncols(&self) -> usize;
/// Return a matrix element.
fn index(&self, row: usize, column: usize) -> T;
}
/// Simple and inefficient matrix representation used for doctest
/// examples and simple unit tests.
///
/// You should prefer implementing it yourself, or you can enable the
/// `ndarray` Cargo feature and use the provided implementation for
/// [`ndarray::Array2`].
impl<T: Copy> Matrix<T> for Vec<Vec<T>> {
fn nrows(&self) -> usize {
self.len()
}
fn ncols(&self) -> usize {
self[0].len()
}
fn index(&self, row: usize, column: usize) -> T {
self[row][column]
}
}
/// Adapting [`ndarray::Array2`] to the `Matrix` trait.
///
/// **Note: this implementation is only available if you enable the
/// `ndarray` Cargo feature.**
#[cfg(feature = "ndarray")]
impl<T: Copy> Matrix<T> for ndarray::Array2<T> {
#[inline]
fn nrows(&self) -> usize {
self.nrows()
}
#[inline]
fn ncols(&self) -> usize {
self.ncols()
}
#[inline]
fn index(&self, row: usize, column: usize) -> T {
self[[row, column]]
}
}
/// Compute row minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding row minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero columns.
pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
// Benchmarking shows that SMAWK performs roughly the same on row-
// and column-major matrices.
let mut minima = vec![0; matrix.nrows()];
smawk_inner(
&|j, i| matrix.index(i, j),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute column minima in O(*m* + *n*) time.
///
/// This implements the SMAWK algorithm for finding column minima in a
/// totally monotone matrix.
///
/// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and
/// Wilbur, *Geometric applications of a matrix searching algorithm*,
/// Algorithmica 2, pp. 195-208 (1987) and the code here is a
/// translation [David Eppstein's Python code][pads].
///
/// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py
///
/// Running time on an *m* ✕ *n* matrix: O(*m* + *n*).
///
/// # Panics
///
/// It is an error to call this on a matrix with zero rows.
pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> {
let mut mi | lumn minima in the given area of the matrix. The
/// `minima` slice is updated inplace.
fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>(
matrix: &M,
rows: &[usize],
cols: &[usize],
mut minima: &mut [usize],
) {
if cols.is_empty() {
return;
}
let mut stack = Vec::with_capacity(cols.len());
for r in rows {
// TODO: use stack.last() instead of stack.is_empty() etc
while!stack.is_empty()
&& matrix(stack[stack.len() - 1], cols[stack.len() - 1])
> matrix(*r, cols[stack.len() - 1])
{
stack.pop();
}
if stack.len()!= cols.len() {
stack.push(*r);
}
}
let rows = &stack;
let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2);
for (idx, c) in cols.iter().enumerate() {
if idx % 2 == 1 {
odd_cols.push(*c);
}
}
smawk_inner(matrix, rows, &odd_cols, &mut minima);
let mut r = 0;
for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) {
let mut row = rows[r];
let last_row = if c == cols.len() - 1 {
rows[rows.len() - 1]
} else {
minima[cols[c + 1]]
};
let mut pair = (matrix(row, col), row);
while row!= last_row {
r += 1;
row = rows[r];
if (matrix(row, col), row) < pair {
pair = (matrix(row, col), row);
}
}
minima[col] = pair.1;
}
}
/// Compute upper-right column minima in O(*m* + *n*) time.
///
/// The input matrix must be totally monotone.
///
/// The function returns a vector of `(usize, T)`. The `usize` in the
/// tuple at index `j` tells you the row of the minimum value in
/// column `j` and the `T` value is minimum value itself.
///
/// The algorithm only considers values above the main diagonal, which
/// means that it computes values `v(j)` where:
///
/// ```text
/// v(0) = initial
/// v(j) = min { M[i, j] | i < j } for j > 0
/// ```
///
/// If we let `r(j)` denote the row index of the minimum value in
/// column `j`, the tuples in the result vector become `(r(j), M[r(j),
/// j])`.
///
/// The algorithm is an *online* algorithm, in the sense that `matrix`
/// function can refer back to previously computed column minima when
/// determining an entry in the matrix. The guarantee is that we only
/// call `matrix(i, j)` after having computed `v(i)`. This is
/// reflected in the `&[(usize, T)]` argument to `matrix`, which grows
/// as more and more values are computed.
pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>(
initial: T,
size: usize,
matrix: M,
) -> Vec<(usize, T)> {
let mut result = vec![(0, initial)];
// State used by the algorithm.
let mut finished = 0;
let mut base = 0;
let mut tentative = 0;
// Shorthand for evaluating the matrix. We need a macro here since
// we don't want to borrow the result vector.
macro_rules! m {
($i:expr, $j:expr) => {{
assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j);
assert!(
$i < size && $j < size,
"(i, j) out of bounds: ({}, {}), size: {}",
$i,
$j,
size
);
matrix(&result[..finished + 1], $i, $j)
}};
}
// Keep going until we have finished all size columns. Since the
// columns are zero-indexed, we're done when finished == size - 1.
while finished < size - 1 {
// First case: we have already advanced past the previous
// tentative value. We make a new tentative value by applying
// smawk_inner to the largest square submatrix that fits under
// the base.
let i = finished + 1;
if i > tentative {
let rows = (base..finished + 1).collect::<Vec<_>>();
tentative = std::cmp::min(finished + rows.len(), size - 1);
let cols = (finished + 1..tentative + 1).collect::<Vec<_>>();
let mut minima = vec![0; tentative + 1];
smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima);
for col in cols {
let row = minima[col];
let v = m![row, col];
if col >= result.len() {
result.push((row, v));
} else if v < result[col].1 {
result[col] = (row, v);
}
}
finished = i;
continue;
}
// Second case: the new column minimum is on the diagonal. All
// subsequent ones will be at least as low, so we can clear
// out all our work from higher rows. As in the fourth case,
// the loss of tentative is amortized against the increase in
// base.
let diag = m![i - 1, i];
if diag < result[i].1 {
result[i] = (i - 1, diag);
base = i - 1;
tentative = i;
finished = i;
continue;
}
// Third case: row i-1 does not supply a column minimum in any
// column up to tentative. We simply advance finished while
// maintaining the invariant.
if m![i - 1, tentative] >= result[tentative].1 {
finished = i;
continue;
}
// Fourth and final case: a new column minimum at tentative.
// This allows us to make progress by incorporating rows prior
// to finished into the base. The base invariant holds because
// these rows cannot supply any later column minima. The work
// done when we last advanced tentative (and undone by this
// step) can be amortized against the increase in base.
base = i - 1;
tentative = i;
finished = i;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smawk_1x1() {
let matrix = vec![vec![2]];
assert_eq!(smawk_row_minima(&matrix), vec![0]);
assert_eq!(smawk_column_minima(&matrix), vec![0]);
}
#[test]
fn smawk_2x1() {
let matrix = vec![
vec![3], //
vec![2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1]);
}
#[test]
fn smawk_1x2() {
let matrix = vec![vec![2, 1]];
assert_eq!(smawk_row_minima(&matrix), vec![1]);
assert_eq!(smawk_column_minima(&matrix), vec![0, 0]);
}
#[test]
fn smawk_2x2() {
let matrix = vec![
vec![3, 2], //
vec![2, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn smawk_3x3() {
let matrix = vec![
vec![3, 4, 4], //
vec![3, 4, 4],
vec![2, 3, 3],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![2, 2, 2]);
}
#[test]
fn smawk_4x4() {
let matrix = vec![
vec![4, 5, 5, 5], //
vec![2, 3, 3, 3],
vec![2, 3, 3, 3],
vec![2, 2, 2, 2],
];
assert_eq!(smawk_row_minima(&matrix), vec![0, 0, 0, 0]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 3, 3, 3]);
}
#[test]
fn smawk_5x5() {
let matrix = vec![
vec![3, 2, 4, 5, 6],
vec![2, 1, 3, 3, 4],
vec![2, 1, 3, 3, 4],
vec![3, 2, 4, 3, 4],
vec![4, 3, 2, 1, 1],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1, 1, 1, 3]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1, 4, 4, 4]);
}
#[test]
fn online_1x1() {
let matrix = vec![vec![0]];
let minima = vec![(0, 0)];
assert_eq!(online_column_minima(0, 1, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_2x2() {
let matrix = vec![
vec![0, 2], //
vec![0, 0],
];
let minima = vec![(0, 0), (0, 2)];
assert_eq!(online_column_minima(0, 2, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_3x3() {
let matrix = vec![
vec![0, 4, 4], //
vec![0, 0, 4],
vec![0, 0, 0],
];
let minima = vec![(0, 0), (0, 4), (0, 4)];
assert_eq!(online_column_minima(0, 3, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_4x4() {
let matrix = vec![
vec![0, 5, 5, 5], //
vec![0, 0, 3, 3],
vec![0, 0, 0, 3],
vec![0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 5), (1, 3), (1, 3)];
assert_eq!(online_column_minima(0, 4, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn online_5x5() {
let matrix = vec![
vec![0, 2, 4, 6, 7],
vec![0, 0, 3, 4, 5],
vec![0, 0, 0, 3, 4],
vec![0, 0, 0, 0, 4],
vec![0, 0, 0, 0, 0],
];
let minima = vec![(0, 0), (0, 2), (1, 3), (2, 3), (2, 4)];
assert_eq!(online_column_minima(0, 5, |_, i, j| matrix[i][j]), minima);
}
#[test]
fn smawk_works_with_partial_ord() {
let matrix = vec![
vec![3.0, 2.0], //
vec![2.0, 1.0],
];
assert_eq!(smawk_row_minima(&matrix), vec![1, 1]);
assert_eq!(smawk_column_minima(&matrix), vec![1, 1]);
}
#[test]
fn online_works_with_partial_ord() {
let matrix = vec![
vec![0.0, 2.0], //
vec![0.0, 0.0],
];
let minima = vec![(0, 0.0), (0, 2.0)];
assert_eq!(online_column_minima(0.0, 2, |_, i:usize, j:usize| matrix[i][j]), minima);
}
}
| nima = vec![0; matrix.ncols()];
smawk_inner(
&|i, j| matrix.index(i, j),
&(0..matrix.nrows()).collect::<Vec<_>>(),
&(0..matrix.ncols()).collect::<Vec<_>>(),
&mut minima,
);
minima
}
/// Compute co | identifier_body |
mod.rs | let mut latch_nodes = NodeSet::new();
for edge in self.graph.edges_directed(cur_node, Incoming) {
// backedges are always from original graph nodes
if visited.contains(edge.source()) {
backedges.insert(edge.id());
latch_nodes.insert(edge.source());
}
}
// remove backedges
for e in &backedges {
self.graph.remove_edge(e);
}
// regionify loop
let mut loop_nodes = graph_utils::slice(&self.graph, cur_node, &latch_nodes).nodes;
let loop_header = self.funnel_abnormal_entries(cur_node, &loop_nodes);
let mut succ_nodes =
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes);
self.refine_loop(&mut loop_nodes, &mut succ_nodes);
let loop_succ_opt = {
// pick the successor with the smallest post-order
let final_succ_opt = DfsPostOrder::new(&self.graph, self.entry)
.iter(&self.graph)
.find(|&n| succ_nodes.contains(n));
if let Some(final_succ) = final_succ_opt {
succ_nodes.remove(final_succ);
let loop_succ =
self.funnel_abnormal_exits(&mut loop_nodes, final_succ, &succ_nodes);
Some(loop_succ)
} else {
None
}
};
debug_assert!(
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes).len() <= 1
);
let loop_body = self.structure_acyclic_sese_region(loop_header, &loop_nodes);
let repl_ast = refinement::refine_loop::<A>(self.cctx, loop_body);
self.graph[loop_header] = CfgNode::Code(repl_ast);
if let Some(loop_succ) = loop_succ_opt {
self.graph.add_edge(loop_header, loop_succ, CfgEdge::True);
}
} else {
// acyclic
let region = graph_utils::dominated_by(&self.graph, self.entry, cur_node);
// single-block regions aren't interesting
if region.len() > 1 {
let succs = graph_utils::strict_successors_of_set(&self.graph, ®ion);
// `region` must have one or zero successors
if succs.len() <= 1 {
let opt_succ = succs.iter().next();
let repl_ast = self.structure_acyclic_sese_region(cur_node, ®ion);
self.graph[cur_node] = CfgNode::Code(repl_ast);
if let Some(succ) = opt_succ {
self.graph.add_edge(cur_node, succ, CfgEdge::True);
}
}
}
}
}
let ret = self.graph.remove_node(self.entry).unwrap();
debug_assert!(self.graph.node_count() == 0);
if let CfgNode::Code(ret) = ret {
(ret, self.actx)
} else {
panic!("last node wasn't a Code node")
}
}
/// Converts the given acyclic region headed by `header` into an `AstNode`.
fn structure_acyclic_sese_region(
&mut self,
header: NodeIndex,
region: &NodeSet,
) -> AstNode<'cd, A> {
let slice = graph_utils::slice(&self.graph, header, region);
let reaching_conds = self.reaching_conditions(&slice);
let mut region_graph =
StableDiGraph::with_capacity(slice.topo_order.len(), slice.edges.len());
let mut old_new_map = HashMap::with_capacity(slice.topo_order.len());
let mut region_conditions = Vec::new();
// move all region nodes into `region_graph`.
for &old_n in &slice.topo_order {
let cfg_node = mem::replace(&mut self.graph[old_n], CfgNode::Dummy("sasr replaced"));
if let CfgNode::Condition(c) = cfg_node {
// record all conditions in the region
region_conditions.push(c);
}
let new_node = match cfg_node {
// refinement needs to be able to see `Break`s
CfgNode::Code(AstNodeC::Break) => Some(AstNodeC::Break),
// other nodes should be opaque
CfgNode::Code(ast) => Some(AstNodeC::BasicBlock(ast)),
_ => None,
};
let new_n = region_graph.add_node((reaching_conds[&old_n], new_node));
old_new_map.insert(old_n, new_n);
}
let old_new_map = old_new_map;
// copy over edges
for e in &slice.edges {
let (src, dst) = self.graph.edge_endpoints(e).unwrap();
region_graph.add_edge(old_new_map[&src], old_new_map[&dst], ());
}
// remove region nodes from the cfg
for &n in &slice.topo_order {
// we don't want to remove `header` since that will also remove
// incoming edges, which we need to keep
if n!= header {
let _removed = self.graph.remove_node(n);
debug_assert!(_removed.is_some());
}
}
let ast = refinement::refine::<RegionAstContext<A>>(
self.cctx,
region_graph,
old_new_map[&header],
);
let ast = dedup_conds::run(&mut self.actx, self.cctx, ®ion_conditions, ast);
let ast = RegionAstContext::<A>::export(ast);
refinement::simplify_ast_node::<A>(self.cctx, ast).unwrap_or_default()
}
/// Computes the reaching condition for every node in the given graph slice.
fn reaching_conditions(
&self,
slice: &graph_utils::GraphSlice<NodeIndex, EdgeIndex>,
) -> HashMap<NodeIndex, Condition<'cd, A>> {
// {Node, Edge}Filtered don't implement IntoNeighborsDirected :(
// https://github.com/bluss/petgraph/pull/219
// Also EdgeFiltered<Reversed<_>, _> isn't Into{Neighbors, Edges}
// because Reversed<_> isn't IntoEdges
let mut ret = HashMap::with_capacity(slice.topo_order.len());
let mut iter = slice.topo_order.iter();
if let Some(&start) = iter.next() {
ret.insert(start, self.cctx.mk_true());
for &n in iter {
let reach_cond = self.cctx.mk_or_from_iter(
// manually restrict to slice
self.graph
.edges_directed(n, Incoming)
.filter(|e| slice.edges.contains(e.id()))
.map(|e| {
let src_cond = ret[&e.source()];
match (&self.graph[e.source()], e.weight()) {
(&CfgNode::Condition(c), CfgEdge::True) => {
self.cctx.mk_and(src_cond, self.cctx.mk_var(c))
}
(&CfgNode::Condition(c), CfgEdge::False) => self
.cctx
.mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))),
(_, CfgEdge::True) => src_cond,
(_, CfgEdge::False) => self.cctx.mk_false(),
}
}),
);
let _old = ret.insert(n, reach_cond);
debug_assert!(_old.is_none());
}
}
ret
}
/// Transforms the loop into a single-entry loop.
/// Returns the new loop header.
fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex {
let mut entry_map = HashMap::new();
for n in loop_nodes {
for e in self.graph.edges_directed(n, Incoming) {
if!loop_nodes.contains(e.source()) {
entry_map.entry(n).or_insert(Vec::new()).push(e.id());
}
}
}
// loop must be reachable, so the header must have entries
let header_entries = entry_map.remove(&header).unwrap();
debug_assert!(!header_entries.is_empty());
let abnormal_entry_map = entry_map;
if abnormal_entry_map.is_empty() {
// no abnormal entries
return header;
}
let abnormal_entry_iter = (1..).zip(&abnormal_entry_map);
let struct_var = self.actx.mk_fresh_var();
// make condition cascade
let new_header = {
let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t));
let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\""));
let mut prev_cascade_node = dummy_preheader;
let mut prev_entry_target = header;
let mut prev_entry_num = 0;
// we make the condition node for the *previous* entry target b/c
// the current one might be the last one, which shouldn't get a
// condition node because it's the only possible target
for (entry_num, entry_target) in abnormal_entry_iter {
let prev_cond_eq = self
.cctx
.new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num));
let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq));
self.graph
.add_edge(prev_cascade_node, cascade_node, CfgEdge::False);
self.graph
.add_edge(cascade_node, prev_entry_target, CfgEdge::True);
let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, 0),
)));
self.graph
.add_edge(struct_reset, entry_target, CfgEdge::True);
prev_cascade_node = cascade_node;
prev_entry_target = struct_reset;
prev_entry_num = entry_num;
}
self.graph
.add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False);
// we always add an edge from dummy_preheader
let new_header = self.graph.neighbors(dummy_preheader).next().unwrap();
self.graph.remove_node(dummy_preheader);
new_header
};
// redirect entries
for (entry_num, entry_edges) in
iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e)))
{
let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, entry_num),
)));
self.graph
.add_edge(struct_assign, new_header, CfgEdge::True);
for &entry_edge in entry_edges {
graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign);
}
}
new_header
}
/// Incrementally adds nodes dominated by the loop to the loop until
/// there's only one successor or there are no more nodes to add.
fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () {
// reuse this `NodeSet` so we avoid allocating
let mut new_nodes = NodeSet::new();
while succ_nodes.len() > 1 {
for n in &*succ_nodes {
if self
.graph
.neighbors_directed(n, Incoming)
.all(|pred| loop_nodes.contains(pred))
{
// post-pone removal from `succ_nodes` b/c rust ownership
loop_nodes.insert(n);
new_nodes.extend(self.graph.neighbors(n).filter(|&u|!loop_nodes.contains(u)));
}
}
// do the removal
succ_nodes.difference_with(&loop_nodes);
if new_nodes.is_empty() {
break;
}
succ_nodes.union_with(&new_nodes);
new_nodes.clear();
}
}
/// Transforms the loop so that all loop exits are `break`.
/// Returns the new loop successor.
fn funnel_abnormal_exits(
&mut self,
loop_nodes: &mut NodeSet,
final_succ: NodeIndex,
abn_succ_nodes: &NodeSet,
) -> NodeIndex {
// replace "normal" exit edges with "break"
{
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
}
}
if abn_succ_nodes.is_empty() {
// no abnormal exits
return final_succ;
}
let abn_succ_iter = (1..).zip(abn_succ_nodes);
let struct_var = self.actx.mk_fresh_var_zeroed();
// replace abnormal exit edges with "break"
for (exit_num, exit_target) in abn_succ_iter.clone() {
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, exit_target)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Seq(vec![
AstNodeC::BasicBlock(self.actx.mk_var_assign(&struct_var, exit_num)),
AstNodeC::Break,
])));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
}
}
let mut cur_succ = final_succ;
// make condition cascade
for (exit_num, exit_target) in abn_succ_iter.clone() {
let cond = self
.cctx
.new_var(self.actx.mk_cond_equals(&struct_var, exit_num));
let cascade_node = self.graph.add_node(CfgNode::Condition(cond));
self.graph
.add_edge(cascade_node, exit_target, CfgEdge::True);
self.graph.add_edge(cascade_node, cur_succ, CfgEdge::False);
cur_succ = cascade_node;
}
cur_succ
}
}
struct RegionAstContext<'cd, A>(PhantomData<(&'cd (), A)>);
impl<'cd, A: AstContext> AstContext for RegionAstContext<'cd, A> {
type Block = AstNode<'cd, A>;
type Condition = A::Condition;
type BoolVariable = A::BoolVariable;
type Variable = A::Variable;
}
impl<'cd, A: AstContext> RegionAstContext<'cd, A> {
fn export(ast: AstNode<'cd, Self>) -> AstNode<'cd, A> {
use self::AstNodeC::*;
match ast {
BasicBlock(b) => b,
Seq(seq) => Seq(seq.into_iter().map(Self::export).collect()),
Cond(c, t, oe) => Cond(
c,
Box::new(Self::export(*t)),
oe.map(|e| Box::new(Self::export(*e))),
),
Loop(t, b) => Loop(t, Box::new(Self::export(*b))),
Break => Break,
Switch(v, cases, default) => Switch(
v,
cases
.into_iter()
.map(|(vs, a)| (vs, Self::export(a)))
.collect(),
Box::new(Self::export(*default)),
),
}
}
}
pub fn mk_code_node<A: AstContext>(block: A::Block) -> CfgNode<'static, A> { | CfgNode::Code(AstNodeC::BasicBlock(block)) | random_line_split |
|
mod.rs | `", s),
}
}
}
pub fn structure_whole(mut self) -> (AstNode<'cd, A>, A) {
let mut loop_headers = NodeSet::new();
let mut podfs_trace = Vec::new();
graph_utils::depth_first_search(&self.graph, self.entry, |ev| {
use self::graph_utils::DfsEvent::*;
match ev {
BackEdge(e) => {
loop_headers.insert(e.target());
}
Finish(n) => podfs_trace.push(n),
_ => (),
}
});
let (podfs_trace, loop_headers) = (podfs_trace, loop_headers);
let mut visited = NodeSet::with_capacity(self.graph.node_bound());
for &cur_node in &podfs_trace {
visited.insert(cur_node);
if loop_headers.contains(cur_node) {
// loop
// find latch nodes
let mut backedges = EdgeSet::new();
let mut latch_nodes = NodeSet::new();
for edge in self.graph.edges_directed(cur_node, Incoming) {
// backedges are always from original graph nodes
if visited.contains(edge.source()) {
backedges.insert(edge.id());
latch_nodes.insert(edge.source());
}
}
// remove backedges
for e in &backedges {
self.graph.remove_edge(e);
}
// regionify loop
let mut loop_nodes = graph_utils::slice(&self.graph, cur_node, &latch_nodes).nodes;
let loop_header = self.funnel_abnormal_entries(cur_node, &loop_nodes);
let mut succ_nodes =
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes);
self.refine_loop(&mut loop_nodes, &mut succ_nodes);
let loop_succ_opt = {
// pick the successor with the smallest post-order
let final_succ_opt = DfsPostOrder::new(&self.graph, self.entry)
.iter(&self.graph)
.find(|&n| succ_nodes.contains(n));
if let Some(final_succ) = final_succ_opt {
succ_nodes.remove(final_succ);
let loop_succ =
self.funnel_abnormal_exits(&mut loop_nodes, final_succ, &succ_nodes);
Some(loop_succ)
} else {
None
}
};
debug_assert!(
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes).len() <= 1
);
let loop_body = self.structure_acyclic_sese_region(loop_header, &loop_nodes);
let repl_ast = refinement::refine_loop::<A>(self.cctx, loop_body);
self.graph[loop_header] = CfgNode::Code(repl_ast);
if let Some(loop_succ) = loop_succ_opt {
self.graph.add_edge(loop_header, loop_succ, CfgEdge::True);
}
} else {
// acyclic
let region = graph_utils::dominated_by(&self.graph, self.entry, cur_node);
// single-block regions aren't interesting
if region.len() > 1 {
let succs = graph_utils::strict_successors_of_set(&self.graph, ®ion);
// `region` must have one or zero successors
if succs.len() <= 1 {
let opt_succ = succs.iter().next();
let repl_ast = self.structure_acyclic_sese_region(cur_node, ®ion);
self.graph[cur_node] = CfgNode::Code(repl_ast);
if let Some(succ) = opt_succ {
self.graph.add_edge(cur_node, succ, CfgEdge::True);
}
}
}
}
}
let ret = self.graph.remove_node(self.entry).unwrap();
debug_assert!(self.graph.node_count() == 0);
if let CfgNode::Code(ret) = ret {
(ret, self.actx)
} else {
panic!("last node wasn't a Code node")
}
}
/// Converts the given acyclic region headed by `header` into an `AstNode`.
fn structure_acyclic_sese_region(
&mut self,
header: NodeIndex,
region: &NodeSet,
) -> AstNode<'cd, A> {
let slice = graph_utils::slice(&self.graph, header, region);
let reaching_conds = self.reaching_conditions(&slice);
let mut region_graph =
StableDiGraph::with_capacity(slice.topo_order.len(), slice.edges.len());
let mut old_new_map = HashMap::with_capacity(slice.topo_order.len());
let mut region_conditions = Vec::new();
// move all region nodes into `region_graph`.
for &old_n in &slice.topo_order {
let cfg_node = mem::replace(&mut self.graph[old_n], CfgNode::Dummy("sasr replaced"));
if let CfgNode::Condition(c) = cfg_node {
// record all conditions in the region
region_conditions.push(c);
}
let new_node = match cfg_node {
// refinement needs to be able to see `Break`s
CfgNode::Code(AstNodeC::Break) => Some(AstNodeC::Break),
// other nodes should be opaque
CfgNode::Code(ast) => Some(AstNodeC::BasicBlock(ast)),
_ => None,
};
let new_n = region_graph.add_node((reaching_conds[&old_n], new_node));
old_new_map.insert(old_n, new_n);
}
let old_new_map = old_new_map;
// copy over edges
for e in &slice.edges {
let (src, dst) = self.graph.edge_endpoints(e).unwrap();
region_graph.add_edge(old_new_map[&src], old_new_map[&dst], ());
}
// remove region nodes from the cfg
for &n in &slice.topo_order {
// we don't want to remove `header` since that will also remove
// incoming edges, which we need to keep
if n!= header {
let _removed = self.graph.remove_node(n);
debug_assert!(_removed.is_some());
}
}
let ast = refinement::refine::<RegionAstContext<A>>(
self.cctx,
region_graph,
old_new_map[&header],
);
let ast = dedup_conds::run(&mut self.actx, self.cctx, ®ion_conditions, ast);
let ast = RegionAstContext::<A>::export(ast);
refinement::simplify_ast_node::<A>(self.cctx, ast).unwrap_or_default()
}
/// Computes the reaching condition for every node in the given graph slice.
fn reaching_conditions(
&self,
slice: &graph_utils::GraphSlice<NodeIndex, EdgeIndex>,
) -> HashMap<NodeIndex, Condition<'cd, A>> {
// {Node, Edge}Filtered don't implement IntoNeighborsDirected :(
// https://github.com/bluss/petgraph/pull/219
// Also EdgeFiltered<Reversed<_>, _> isn't Into{Neighbors, Edges}
// because Reversed<_> isn't IntoEdges
let mut ret = HashMap::with_capacity(slice.topo_order.len());
let mut iter = slice.topo_order.iter();
if let Some(&start) = iter.next() {
ret.insert(start, self.cctx.mk_true());
for &n in iter {
let reach_cond = self.cctx.mk_or_from_iter(
// manually restrict to slice
self.graph
.edges_directed(n, Incoming)
.filter(|e| slice.edges.contains(e.id()))
.map(|e| {
let src_cond = ret[&e.source()];
match (&self.graph[e.source()], e.weight()) {
(&CfgNode::Condition(c), CfgEdge::True) => {
self.cctx.mk_and(src_cond, self.cctx.mk_var(c))
}
(&CfgNode::Condition(c), CfgEdge::False) => self
.cctx
.mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))),
(_, CfgEdge::True) => src_cond,
(_, CfgEdge::False) => self.cctx.mk_false(),
}
}),
);
let _old = ret.insert(n, reach_cond);
debug_assert!(_old.is_none());
}
}
ret
}
/// Transforms the loop into a single-entry loop.
/// Returns the new loop header.
fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex {
let mut entry_map = HashMap::new();
for n in loop_nodes {
for e in self.graph.edges_directed(n, Incoming) {
if!loop_nodes.contains(e.source()) {
entry_map.entry(n).or_insert(Vec::new()).push(e.id());
}
}
}
// loop must be reachable, so the header must have entries
let header_entries = entry_map.remove(&header).unwrap();
debug_assert!(!header_entries.is_empty());
let abnormal_entry_map = entry_map;
if abnormal_entry_map.is_empty() {
// no abnormal entries
return header;
}
let abnormal_entry_iter = (1..).zip(&abnormal_entry_map);
let struct_var = self.actx.mk_fresh_var();
// make condition cascade
let new_header = {
let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t));
let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\""));
let mut prev_cascade_node = dummy_preheader;
let mut prev_entry_target = header;
let mut prev_entry_num = 0;
// we make the condition node for the *previous* entry target b/c
// the current one might be the last one, which shouldn't get a
// condition node because it's the only possible target
for (entry_num, entry_target) in abnormal_entry_iter {
let prev_cond_eq = self
.cctx
.new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num));
let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq));
self.graph
.add_edge(prev_cascade_node, cascade_node, CfgEdge::False);
self.graph
.add_edge(cascade_node, prev_entry_target, CfgEdge::True);
let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, 0),
)));
self.graph
.add_edge(struct_reset, entry_target, CfgEdge::True);
prev_cascade_node = cascade_node;
prev_entry_target = struct_reset;
prev_entry_num = entry_num;
}
self.graph
.add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False);
// we always add an edge from dummy_preheader
let new_header = self.graph.neighbors(dummy_preheader).next().unwrap();
self.graph.remove_node(dummy_preheader);
new_header
};
// redirect entries
for (entry_num, entry_edges) in
iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e)))
{
let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, entry_num),
)));
self.graph
.add_edge(struct_assign, new_header, CfgEdge::True);
for &entry_edge in entry_edges {
graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign);
}
}
new_header
}
/// Incrementally adds nodes dominated by the loop to the loop until
/// there's only one successor or there are no more nodes to add.
fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () {
// reuse this `NodeSet` so we avoid allocating
let mut new_nodes = NodeSet::new();
while succ_nodes.len() > 1 {
for n in &*succ_nodes {
if self
.graph
.neighbors_directed(n, Incoming)
.all(|pred| loop_nodes.contains(pred))
{
// post-pone removal from `succ_nodes` b/c rust ownership
loop_nodes.insert(n);
new_nodes.extend(self.graph.neighbors(n).filter(|&u|!loop_nodes.contains(u)));
}
}
// do the removal
succ_nodes.difference_with(&loop_nodes);
if new_nodes.is_empty() {
break;
}
succ_nodes.union_with(&new_nodes);
new_nodes.clear();
}
}
/// Transforms the loop so that all loop exits are `break`.
/// Returns the new loop successor.
fn funnel_abnormal_exits(
&mut self,
loop_nodes: &mut NodeSet,
final_succ: NodeIndex,
abn_succ_nodes: &NodeSet,
) -> NodeIndex {
// replace "normal" exit edges with "break"
{
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
}
}
if abn_succ_nodes.is_empty() {
// no abnormal exits
return final_succ;
}
let abn_succ_iter = (1..).zip(abn_succ_nodes);
let struct_var = self.actx.mk_fresh_var_zeroed();
// replace abnormal exit edges with "break"
for (exit_num, exit_target) in abn_succ_iter.clone() {
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, exit_target)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Seq(vec![
AstNodeC::BasicBlock(self.actx.mk_var_assign(&struct_var, exit_num)),
AstNodeC::Break,
])));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
}
}
let mut cur_succ = final_succ;
// make condition cascade
for (exit_num, exit_target) in abn_succ_iter.clone() {
let cond = self
.cctx
.new_var(self.actx.mk_cond_equals(&struct_var, exit_num));
let cascade_node = self.graph.add_node(CfgNode::Condition(cond));
self.graph
.add_edge(cascade_node, exit_target, CfgEdge::True);
self.graph.add_edge(cascade_node, cur_succ, CfgEdge::False);
cur_succ = cascade_node;
}
cur_succ
}
}
struct RegionAstContext<'cd, A>(PhantomData<(&'cd (), A)>);
impl<'cd, A: AstContext> AstContext for RegionAstContext<'cd, A> {
type Block = AstNode<'cd, A>;
type Condition = A::Condition;
type BoolVariable = A::BoolVariable;
type Variable = A::Variable;
}
impl<'cd, A: AstContext> RegionAstContext<'cd, A> {
fn | export | identifier_name |
|
mod.rs | nodes must be reachable from `entry`
pub fn new(
graph: StableDiGraph<CfgNode<'cd, A>, CfgEdge>,
entry: NodeIndex,
cctx: CondContext<'cd, A>,
actx: A,
) -> Self {
let ret = Self {
graph,
entry,
cctx,
actx,
};
ret.check();
ret
}
#[cfg(not(debug_assertions))]
fn check(&self) {}
#[cfg(debug_assertions)]
fn check(&self) {
for n in self.graph.node_indices() {
assert!(graph_utils::is_source(&self.graph, n) == (n == self.entry));
match &self.graph[n] {
CfgNode::Code(_) => assert!(self.graph.neighbors(n).count() <= 1),
CfgNode::Condition(_) => assert!(self.graph.neighbors(n).count() == 2),
CfgNode::Dummy(s) => panic!("found `CfgNode::Dummy({:?})`", s),
}
}
}
pub fn structure_whole(mut self) -> (AstNode<'cd, A>, A) {
let mut loop_headers = NodeSet::new();
let mut podfs_trace = Vec::new();
graph_utils::depth_first_search(&self.graph, self.entry, |ev| {
use self::graph_utils::DfsEvent::*;
match ev {
BackEdge(e) => {
loop_headers.insert(e.target());
}
Finish(n) => podfs_trace.push(n),
_ => (),
}
});
let (podfs_trace, loop_headers) = (podfs_trace, loop_headers);
let mut visited = NodeSet::with_capacity(self.graph.node_bound());
for &cur_node in &podfs_trace {
visited.insert(cur_node);
if loop_headers.contains(cur_node) {
// loop
// find latch nodes
let mut backedges = EdgeSet::new();
let mut latch_nodes = NodeSet::new();
for edge in self.graph.edges_directed(cur_node, Incoming) {
// backedges are always from original graph nodes
if visited.contains(edge.source()) {
backedges.insert(edge.id());
latch_nodes.insert(edge.source());
}
}
// remove backedges
for e in &backedges {
self.graph.remove_edge(e);
}
// regionify loop
let mut loop_nodes = graph_utils::slice(&self.graph, cur_node, &latch_nodes).nodes;
let loop_header = self.funnel_abnormal_entries(cur_node, &loop_nodes);
let mut succ_nodes =
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes);
self.refine_loop(&mut loop_nodes, &mut succ_nodes);
let loop_succ_opt = {
// pick the successor with the smallest post-order
let final_succ_opt = DfsPostOrder::new(&self.graph, self.entry)
.iter(&self.graph)
.find(|&n| succ_nodes.contains(n));
if let Some(final_succ) = final_succ_opt {
succ_nodes.remove(final_succ);
let loop_succ =
self.funnel_abnormal_exits(&mut loop_nodes, final_succ, &succ_nodes);
Some(loop_succ)
} else {
None
}
};
debug_assert!(
graph_utils::strict_successors_of_set(&self.graph, &loop_nodes).len() <= 1
);
let loop_body = self.structure_acyclic_sese_region(loop_header, &loop_nodes);
let repl_ast = refinement::refine_loop::<A>(self.cctx, loop_body);
self.graph[loop_header] = CfgNode::Code(repl_ast);
if let Some(loop_succ) = loop_succ_opt {
self.graph.add_edge(loop_header, loop_succ, CfgEdge::True);
}
} else {
// acyclic
let region = graph_utils::dominated_by(&self.graph, self.entry, cur_node);
// single-block regions aren't interesting
if region.len() > 1 {
let succs = graph_utils::strict_successors_of_set(&self.graph, ®ion);
// `region` must have one or zero successors
if succs.len() <= 1 {
let opt_succ = succs.iter().next();
let repl_ast = self.structure_acyclic_sese_region(cur_node, ®ion);
self.graph[cur_node] = CfgNode::Code(repl_ast);
if let Some(succ) = opt_succ {
self.graph.add_edge(cur_node, succ, CfgEdge::True);
}
}
}
}
}
let ret = self.graph.remove_node(self.entry).unwrap();
debug_assert!(self.graph.node_count() == 0);
if let CfgNode::Code(ret) = ret {
(ret, self.actx)
} else {
panic!("last node wasn't a Code node")
}
}
/// Converts the given acyclic region headed by `header` into an `AstNode`.
fn structure_acyclic_sese_region(
&mut self,
header: NodeIndex,
region: &NodeSet,
) -> AstNode<'cd, A> {
let slice = graph_utils::slice(&self.graph, header, region);
let reaching_conds = self.reaching_conditions(&slice);
let mut region_graph =
StableDiGraph::with_capacity(slice.topo_order.len(), slice.edges.len());
let mut old_new_map = HashMap::with_capacity(slice.topo_order.len());
let mut region_conditions = Vec::new();
// move all region nodes into `region_graph`.
for &old_n in &slice.topo_order {
let cfg_node = mem::replace(&mut self.graph[old_n], CfgNode::Dummy("sasr replaced"));
if let CfgNode::Condition(c) = cfg_node {
// record all conditions in the region
region_conditions.push(c);
}
let new_node = match cfg_node {
// refinement needs to be able to see `Break`s
CfgNode::Code(AstNodeC::Break) => Some(AstNodeC::Break),
// other nodes should be opaque
CfgNode::Code(ast) => Some(AstNodeC::BasicBlock(ast)),
_ => None,
};
let new_n = region_graph.add_node((reaching_conds[&old_n], new_node));
old_new_map.insert(old_n, new_n);
}
let old_new_map = old_new_map;
// copy over edges
for e in &slice.edges {
let (src, dst) = self.graph.edge_endpoints(e).unwrap();
region_graph.add_edge(old_new_map[&src], old_new_map[&dst], ());
}
// remove region nodes from the cfg
for &n in &slice.topo_order {
// we don't want to remove `header` since that will also remove
// incoming edges, which we need to keep
if n!= header {
let _removed = self.graph.remove_node(n);
debug_assert!(_removed.is_some());
}
}
let ast = refinement::refine::<RegionAstContext<A>>(
self.cctx,
region_graph,
old_new_map[&header],
);
let ast = dedup_conds::run(&mut self.actx, self.cctx, ®ion_conditions, ast);
let ast = RegionAstContext::<A>::export(ast);
refinement::simplify_ast_node::<A>(self.cctx, ast).unwrap_or_default()
}
/// Computes the reaching condition for every node in the given graph slice.
fn reaching_conditions(
&self,
slice: &graph_utils::GraphSlice<NodeIndex, EdgeIndex>,
) -> HashMap<NodeIndex, Condition<'cd, A>> {
// {Node, Edge}Filtered don't implement IntoNeighborsDirected :(
// https://github.com/bluss/petgraph/pull/219
// Also EdgeFiltered<Reversed<_>, _> isn't Into{Neighbors, Edges}
// because Reversed<_> isn't IntoEdges
let mut ret = HashMap::with_capacity(slice.topo_order.len());
let mut iter = slice.topo_order.iter();
if let Some(&start) = iter.next() {
ret.insert(start, self.cctx.mk_true());
for &n in iter {
let reach_cond = self.cctx.mk_or_from_iter(
// manually restrict to slice
self.graph
.edges_directed(n, Incoming)
.filter(|e| slice.edges.contains(e.id()))
.map(|e| {
let src_cond = ret[&e.source()];
match (&self.graph[e.source()], e.weight()) {
(&CfgNode::Condition(c), CfgEdge::True) => {
self.cctx.mk_and(src_cond, self.cctx.mk_var(c))
}
(&CfgNode::Condition(c), CfgEdge::False) => self
.cctx
.mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))),
(_, CfgEdge::True) => src_cond,
(_, CfgEdge::False) => self.cctx.mk_false(),
}
}),
);
let _old = ret.insert(n, reach_cond);
debug_assert!(_old.is_none());
}
}
ret
}
/// Transforms the loop into a single-entry loop.
/// Returns the new loop header.
fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex {
let mut entry_map = HashMap::new();
for n in loop_nodes {
for e in self.graph.edges_directed(n, Incoming) {
if!loop_nodes.contains(e.source()) {
entry_map.entry(n).or_insert(Vec::new()).push(e.id());
}
}
}
// loop must be reachable, so the header must have entries
let header_entries = entry_map.remove(&header).unwrap();
debug_assert!(!header_entries.is_empty());
let abnormal_entry_map = entry_map;
if abnormal_entry_map.is_empty() {
// no abnormal entries
return header;
}
let abnormal_entry_iter = (1..).zip(&abnormal_entry_map);
let struct_var = self.actx.mk_fresh_var();
// make condition cascade
let new_header = {
let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t));
let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\""));
let mut prev_cascade_node = dummy_preheader;
let mut prev_entry_target = header;
let mut prev_entry_num = 0;
// we make the condition node for the *previous* entry target b/c
// the current one might be the last one, which shouldn't get a
// condition node because it's the only possible target
for (entry_num, entry_target) in abnormal_entry_iter {
let prev_cond_eq = self
.cctx
.new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num));
let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq));
self.graph
.add_edge(prev_cascade_node, cascade_node, CfgEdge::False);
self.graph
.add_edge(cascade_node, prev_entry_target, CfgEdge::True);
let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, 0),
)));
self.graph
.add_edge(struct_reset, entry_target, CfgEdge::True);
prev_cascade_node = cascade_node;
prev_entry_target = struct_reset;
prev_entry_num = entry_num;
}
self.graph
.add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False);
// we always add an edge from dummy_preheader
let new_header = self.graph.neighbors(dummy_preheader).next().unwrap();
self.graph.remove_node(dummy_preheader);
new_header
};
// redirect entries
for (entry_num, entry_edges) in
iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e)))
{
let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock(
self.actx.mk_var_assign(&struct_var, entry_num),
)));
self.graph
.add_edge(struct_assign, new_header, CfgEdge::True);
for &entry_edge in entry_edges {
graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign);
}
}
new_header
}
/// Incrementally adds nodes dominated by the loop to the loop until
/// there's only one successor or there are no more nodes to add.
fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () {
// reuse this `NodeSet` so we avoid allocating
let mut new_nodes = NodeSet::new();
while succ_nodes.len() > 1 {
for n in &*succ_nodes {
if self
.graph
.neighbors_directed(n, Incoming)
.all(|pred| loop_nodes.contains(pred))
{
// post-pone removal from `succ_nodes` b/c rust ownership
loop_nodes.insert(n);
new_nodes.extend(self.graph.neighbors(n).filter(|&u|!loop_nodes.contains(u)));
}
}
// do the removal
succ_nodes.difference_with(&loop_nodes);
if new_nodes.is_empty() {
break;
}
succ_nodes.union_with(&new_nodes);
new_nodes.clear();
}
}
/// Transforms the loop so that all loop exits are `break`.
/// Returns the new loop successor.
fn funnel_abnormal_exits(
&mut self,
loop_nodes: &mut NodeSet,
final_succ: NodeIndex,
abn_succ_nodes: &NodeSet,
) -> NodeIndex |
// replace abnormal exit edges with "break"
for (exit_num, exit_target) in abn_succ_iter.clone() {
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, exit_target)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Seq(vec![
AstNodeC::BasicBlock(self.actx.mk_var_assign(&struct_var, exit_num)),
AstNodeC::Break,
])));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
| {
// replace "normal" exit edges with "break"
{
let exit_edges: Vec<_> =
graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ)
.collect();
for exit_edge in exit_edges {
let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break));
graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node);
loop_nodes.insert(break_node);
}
}
if abn_succ_nodes.is_empty() {
// no abnormal exits
return final_succ;
}
let abn_succ_iter = (1..).zip(abn_succ_nodes);
let struct_var = self.actx.mk_fresh_var_zeroed(); | identifier_body |
mod.rs | //
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Functionality for different Node types.
use crate::{
permissions::PermissionsConfiguration, NodePrivilege, RuntimeProxy, SecureServerConfiguration,
SignatureTable,
};
use log::warn;
use oak_abi::proto::oak::application::{
node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration,
LogConfiguration, NodeConfiguration,
};
use std::net::AddrParseError;
use tokio::sync::oneshot;
mod crypto;
pub mod grpc;
pub mod http;
mod invocation;
mod logger;
mod roughtime;
mod storage;
mod wasm;
/// Trait encapsulating execution of a Node or pseudo-Node.
pub trait Node: Send {
/// Returns a name for this type of Node.
fn node_type(&self) -> &'static str;
/// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm
/// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes
/// that are part of the runtime) have no restrictions enforced on external communications.
///
/// Unless a node uses a trusted sandbox to restrict communications this function should always
/// return [`NodeIsolation::Uncontrolled`]
fn isolation(&self) -> NodeIsolation {
NodeIsolation::Uncontrolled
}
/// Executes the Node, using the provided `Runtime` reference and initial handle. The method
/// should continue execution until the Node terminates.
///
/// `notify_receiver` receives a notification from the Runtime upon termination. This
/// notification can be used by the Node to gracefully shut down.
fn run(
self: Box<Self>,
runtime: RuntimeProxy,
handle: oak_abi::Handle,
notify_receiver: oneshot::Receiver<()>,
);
}
/// Indication of the level of isolation of a node.
#[derive(Debug)]
pub enum NodeIsolation {
Sandboxed,
Uncontrolled,
}
/// A enumeration for errors occurring when creating a new [`Node`] instance.
// TODO(#1027): Improve or delete this enum.
#[derive(Debug)]
pub enum ConfigurationError {
AddressParsingError(AddrParseError),
IncorrectPort,
IncorrectURI,
NoHostElement,
IncorrectWebAssemblyModuleName,
InvalidNodeConfiguration,
WasmiModuleInializationError(wasmi::Error),
NodeCreationNotPermitted,
}
impl From<AddrParseError> for ConfigurationError {
fn from(error: AddrParseError) -> Self {
ConfigurationError::AddressParsingError(error)
}
}
impl std::fmt::Display for ConfigurationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
ConfigurationError::AddressParsingError(e) => {
write!(f, "Failed to parse an address: {}", e)
}
ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"),
ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"),
ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"),
ConfigurationError::IncorrectWebAssemblyModuleName => {
write!(f, "Incorrect WebAssembly module name")
}
ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"),
ConfigurationError::WasmiModuleInializationError(e) => {
write!(f, "Failed to initialize wasmi::Module: {}", e)
}
ConfigurationError::NodeCreationNotPermitted => {
write!(f, "Node creation not permitted")
}
}
}
}
/// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud
/// environments with WebAssembly support.
pub struct ServerNodeFactory {
pub application_configuration: ApplicationConfiguration,
pub permissions_configuration: PermissionsConfiguration,
pub secure_server_configuration: SecureServerConfiguration,
pub signature_table: SignatureTable,
pub kms_credentials: Option<std::path::PathBuf>,
}
impl NodeFactory<NodeConfiguration> for ServerNodeFactory {
fn create_node(
&self,
node_name: &str,
node_configuration: &NodeConfiguration,
) -> Result<CreatedNode, ConfigurationError> {
if!self
.permissions_configuration
.allowed_creation(node_configuration)
// TODO(#1027): Use anyhow or an improved ConfigurationError
.map_err(|_| ConfigurationError::InvalidNodeConfiguration)?
{
return Err(ConfigurationError::NodeCreationNotPermitted);
}
match &node_configuration.config_type {
Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode {
instance: Box::new(crypto::CryptoNode::new(
node_name,
self.kms_credentials.clone(),
)),
// TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive
// labelled plaintext data and emit unlabelled encrypted data (which would probably
// mean top_privilege() goes here).
privilege: NodePrivilege::default(),
}),
Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode {
instance: Box::new(logger::LogNode::new(node_name)),
// Allow the logger Node to declassify log messages in debug builds only.
#[cfg(feature = "oak-unsafe")]
privilege: NodePrivilege::top_privilege(),
// The logger must not have any declassification privilege in non-debug builds.
#[cfg(not(feature = "oak-unsafe"))]
privilege: NodePrivilege::default(),
}),
Some(ConfigType::GrpcServerConfig(config)) => {
let grpc_configuration = self
.secure_server_configuration
.grpc_config
.clone()
.expect("no gRPC identity provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(grpc::server::GrpcServerNode::new(
node_name,
config.clone(),
grpc_configuration
.grpc_server_tls_identity
.as_ref()
.expect("no gRPC server TLS identity provided to Oak Runtime")
.clone(),
grpc_configuration.oidc_client_info.clone(),
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the identity sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::WasmConfig(config)) => |
Some(ConfigType::GrpcClientConfig(config)) => {
let grpc_client_root_tls_certificate = self
.secure_server_configuration
.clone()
.grpc_config
.expect("no gRPC identity provided to Oak Runtime")
.grpc_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
let uri = config.uri.parse().map_err(|err| {
warn!("could not parse URI {}: {:?}", config.uri, err);
ConfigurationError::IncorrectURI
})?;
Ok(CreatedNode {
instance: Box::new(grpc::client::GrpcClientNode::new(
node_name,
&uri,
grpc_client_root_tls_certificate,
)?),
privilege: grpc::client::get_privilege(&uri),
})
}
Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode {
instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode {
instance: Box::new(storage::StorageNode::new(node_name)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::HttpServerConfig(config)) => {
let tls_config = self
.secure_server_configuration
.http_config
.clone()
.expect("no TLS configuration for HTTP servers provided to Oak Runtime")
.tls_config;
Ok(CreatedNode {
instance: Box::new(http::server::HttpServerNode::new(
node_name,
config.clone(),
tls_config,
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the `identity` sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::HttpClientConfig(config)) => {
let http_client_root_tls_certificate = self
.secure_server_configuration
.http_config
.clone()
.expect("no HTTP configuration provided to Oak Runtime")
.http_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(http::client::HttpClientNode::new(
node_name,
config.clone(),
http_client_root_tls_certificate,
)?),
privilege: http::client::get_privilege(&config.authority),
})
}
None => Err(ConfigurationError::InvalidNodeConfiguration),
}
}
}
/// A holder struct containing a [`Node`] instance, together with the [`NodePrivilege`] that is
/// assigned to it.
///
/// The reason the privilege is returned here and not by the node itself is that it should be
/// determined by the [`NodeFactory`] instance, so that an untrusted Node may not be able to
/// single-handedly increase its own privilege. In this codebase all the Nodes are defined alongside
/// each other, so this does not make much difference, but if Nodes were loaded dynamically or from
/// untrusted libraries, then it would be more obvious that we would still want to keep the
/// privilege assignment here.
pub struct CreatedNode {
pub instance: Box<dyn Node>,
pub privilege: NodePrivilege,
}
/// A trait implemented by a concrete factory of nodes that creates nodes based on a Node
/// configuration of type `T`.
pub trait NodeFactory<T> {
/// Creates a new [`Node`] instance with the specified name and based on the provided Node
/// configuration information.
fn create_node(
&self,
node_name: &str,
node_configuration: &T,
) -> Result<CreatedNode, ConfigurationError>;
}
// the `linear-handles` feature makes [`Sender`] and [`Receiver`] non-`Copy`, so we must use `Clone`
// with the feature turned on. But doing so with the feature off will make clippy complain, so we
// have this simple function that always uses the appropriate impl for copying these types.
// TODO(#1854): Remove this once linear-handles are the default.
#[cfg(not(feature = "linear-handles"))]
pub(crate) fn copy_or_clone<T: Copy>(t: &T) -> T {
*t
}
#[cfg(feature = "linear-handles")]
pub(crate) fn copy_or_clone<T: Clone>(t: &T) -> T {
t.clone()
}
| {
let wasm_module_bytes = self
.application_configuration
.wasm_modules
.get(&config.wasm_module_name)
.ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?;
Ok(CreatedNode {
instance: Box::new(wasm::WasmNode::new(
node_name,
wasm_module_bytes,
config.clone(),
)?),
privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table),
})
} | conditional_block |
mod.rs | //
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Functionality for different Node types.
use crate::{
permissions::PermissionsConfiguration, NodePrivilege, RuntimeProxy, SecureServerConfiguration,
SignatureTable,
};
use log::warn;
use oak_abi::proto::oak::application::{
node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration,
LogConfiguration, NodeConfiguration,
};
use std::net::AddrParseError;
use tokio::sync::oneshot;
mod crypto;
pub mod grpc;
pub mod http;
mod invocation;
mod logger;
mod roughtime;
mod storage;
mod wasm;
/// Trait encapsulating execution of a Node or pseudo-Node.
pub trait Node: Send {
/// Returns a name for this type of Node.
fn node_type(&self) -> &'static str;
/// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm
/// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes
/// that are part of the runtime) have no restrictions enforced on external communications.
///
/// Unless a node uses a trusted sandbox to restrict communications this function should always
/// return [`NodeIsolation::Uncontrolled`]
fn isolation(&self) -> NodeIsolation {
NodeIsolation::Uncontrolled
}
/// Executes the Node, using the provided `Runtime` reference and initial handle. The method
/// should continue execution until the Node terminates.
///
/// `notify_receiver` receives a notification from the Runtime upon termination. This
/// notification can be used by the Node to gracefully shut down.
fn run(
self: Box<Self>,
runtime: RuntimeProxy,
handle: oak_abi::Handle,
notify_receiver: oneshot::Receiver<()>,
);
}
/// Indication of the level of isolation of a node.
#[derive(Debug)]
pub enum NodeIsolation {
Sandboxed,
Uncontrolled,
}
/// A enumeration for errors occurring when creating a new [`Node`] instance.
// TODO(#1027): Improve or delete this enum.
#[derive(Debug)]
pub enum ConfigurationError {
AddressParsingError(AddrParseError),
IncorrectPort,
IncorrectURI,
NoHostElement,
IncorrectWebAssemblyModuleName,
InvalidNodeConfiguration,
WasmiModuleInializationError(wasmi::Error),
NodeCreationNotPermitted,
}
impl From<AddrParseError> for ConfigurationError {
fn from(error: AddrParseError) -> Self {
ConfigurationError::AddressParsingError(error)
}
}
impl std::fmt::Display for ConfigurationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
ConfigurationError::AddressParsingError(e) => {
write!(f, "Failed to parse an address: {}", e)
}
ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"),
ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"),
ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"),
ConfigurationError::IncorrectWebAssemblyModuleName => {
write!(f, "Incorrect WebAssembly module name")
}
ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"),
ConfigurationError::WasmiModuleInializationError(e) => {
write!(f, "Failed to initialize wasmi::Module: {}", e)
}
ConfigurationError::NodeCreationNotPermitted => {
write!(f, "Node creation not permitted")
}
}
}
}
/// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud
/// environments with WebAssembly support.
pub struct ServerNodeFactory {
pub application_configuration: ApplicationConfiguration,
pub permissions_configuration: PermissionsConfiguration,
pub secure_server_configuration: SecureServerConfiguration,
pub signature_table: SignatureTable,
pub kms_credentials: Option<std::path::PathBuf>,
}
impl NodeFactory<NodeConfiguration> for ServerNodeFactory {
fn create_node(
&self,
node_name: &str,
node_configuration: &NodeConfiguration,
) -> Result<CreatedNode, ConfigurationError> {
if!self
.permissions_configuration
.allowed_creation(node_configuration)
// TODO(#1027): Use anyhow or an improved ConfigurationError
.map_err(|_| ConfigurationError::InvalidNodeConfiguration)?
{
return Err(ConfigurationError::NodeCreationNotPermitted);
}
match &node_configuration.config_type {
Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode {
instance: Box::new(crypto::CryptoNode::new(
node_name,
self.kms_credentials.clone(),
)),
// TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive
// labelled plaintext data and emit unlabelled encrypted data (which would probably
// mean top_privilege() goes here).
privilege: NodePrivilege::default(),
}),
Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode {
instance: Box::new(logger::LogNode::new(node_name)),
// Allow the logger Node to declassify log messages in debug builds only.
#[cfg(feature = "oak-unsafe")]
privilege: NodePrivilege::top_privilege(),
// The logger must not have any declassification privilege in non-debug builds.
#[cfg(not(feature = "oak-unsafe"))]
privilege: NodePrivilege::default(),
}),
Some(ConfigType::GrpcServerConfig(config)) => {
let grpc_configuration = self
.secure_server_configuration
.grpc_config
.clone()
.expect("no gRPC identity provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(grpc::server::GrpcServerNode::new(
node_name,
config.clone(),
grpc_configuration
.grpc_server_tls_identity
.as_ref()
.expect("no gRPC server TLS identity provided to Oak Runtime")
.clone(),
grpc_configuration.oidc_client_info.clone(),
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the identity sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::WasmConfig(config)) => {
let wasm_module_bytes = self
.application_configuration
.wasm_modules
.get(&config.wasm_module_name)
.ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?;
Ok(CreatedNode {
instance: Box::new(wasm::WasmNode::new(
node_name,
wasm_module_bytes,
config.clone(),
)?),
privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table),
})
}
Some(ConfigType::GrpcClientConfig(config)) => {
let grpc_client_root_tls_certificate = self
.secure_server_configuration
.clone()
.grpc_config
.expect("no gRPC identity provided to Oak Runtime")
.grpc_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
let uri = config.uri.parse().map_err(|err| {
warn!("could not parse URI {}: {:?}", config.uri, err);
ConfigurationError::IncorrectURI
})?;
Ok(CreatedNode {
instance: Box::new(grpc::client::GrpcClientNode::new(
node_name,
&uri,
grpc_client_root_tls_certificate,
)?),
privilege: grpc::client::get_privilege(&uri), | privilege: NodePrivilege::default(),
}),
Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode {
instance: Box::new(storage::StorageNode::new(node_name)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::HttpServerConfig(config)) => {
let tls_config = self
.secure_server_configuration
.http_config
.clone()
.expect("no TLS configuration for HTTP servers provided to Oak Runtime")
.tls_config;
Ok(CreatedNode {
instance: Box::new(http::server::HttpServerNode::new(
node_name,
config.clone(),
tls_config,
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the `identity` sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::HttpClientConfig(config)) => {
let http_client_root_tls_certificate = self
.secure_server_configuration
.http_config
.clone()
.expect("no HTTP configuration provided to Oak Runtime")
.http_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(http::client::HttpClientNode::new(
node_name,
config.clone(),
http_client_root_tls_certificate,
)?),
privilege: http::client::get_privilege(&config.authority),
})
}
None => Err(ConfigurationError::InvalidNodeConfiguration),
}
}
}
/// A holder struct containing a [`Node`] instance, together with the [`NodePrivilege`] that is
/// assigned to it.
///
/// The reason the privilege is returned here and not by the node itself is that it should be
/// determined by the [`NodeFactory`] instance, so that an untrusted Node may not be able to
/// single-handedly increase its own privilege. In this codebase all the Nodes are defined alongside
/// each other, so this does not make much difference, but if Nodes were loaded dynamically or from
/// untrusted libraries, then it would be more obvious that we would still want to keep the
/// privilege assignment here.
pub struct CreatedNode {
pub instance: Box<dyn Node>,
pub privilege: NodePrivilege,
}
/// A trait implemented by a concrete factory of nodes that creates nodes based on a Node
/// configuration of type `T`.
pub trait NodeFactory<T> {
/// Creates a new [`Node`] instance with the specified name and based on the provided Node
/// configuration information.
fn create_node(
&self,
node_name: &str,
node_configuration: &T,
) -> Result<CreatedNode, ConfigurationError>;
}
// the `linear-handles` feature makes [`Sender`] and [`Receiver`] non-`Copy`, so we must use `Clone`
// with the feature turned on. But doing so with the feature off will make clippy complain, so we
// have this simple function that always uses the appropriate impl for copying these types.
// TODO(#1854): Remove this once linear-handles are the default.
#[cfg(not(feature = "linear-handles"))]
pub(crate) fn copy_or_clone<T: Copy>(t: &T) -> T {
*t
}
#[cfg(feature = "linear-handles")]
pub(crate) fn copy_or_clone<T: Clone>(t: &T) -> T {
t.clone()
} | })
}
Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode {
instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)), | random_line_split |
mod.rs | //
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Functionality for different Node types.
use crate::{
permissions::PermissionsConfiguration, NodePrivilege, RuntimeProxy, SecureServerConfiguration,
SignatureTable,
};
use log::warn;
use oak_abi::proto::oak::application::{
node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration,
LogConfiguration, NodeConfiguration,
};
use std::net::AddrParseError;
use tokio::sync::oneshot;
mod crypto;
pub mod grpc;
pub mod http;
mod invocation;
mod logger;
mod roughtime;
mod storage;
mod wasm;
/// Trait encapsulating execution of a Node or pseudo-Node.
pub trait Node: Send {
/// Returns a name for this type of Node.
fn node_type(&self) -> &'static str;
/// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm
/// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes
/// that are part of the runtime) have no restrictions enforced on external communications.
///
/// Unless a node uses a trusted sandbox to restrict communications this function should always
/// return [`NodeIsolation::Uncontrolled`]
fn isolation(&self) -> NodeIsolation {
NodeIsolation::Uncontrolled
}
/// Executes the Node, using the provided `Runtime` reference and initial handle. The method
/// should continue execution until the Node terminates.
///
/// `notify_receiver` receives a notification from the Runtime upon termination. This
/// notification can be used by the Node to gracefully shut down.
fn run(
self: Box<Self>,
runtime: RuntimeProxy,
handle: oak_abi::Handle,
notify_receiver: oneshot::Receiver<()>,
);
}
/// Indication of the level of isolation of a node.
#[derive(Debug)]
pub enum NodeIsolation {
Sandboxed,
Uncontrolled,
}
/// A enumeration for errors occurring when creating a new [`Node`] instance.
// TODO(#1027): Improve or delete this enum.
#[derive(Debug)]
pub enum ConfigurationError {
AddressParsingError(AddrParseError),
IncorrectPort,
IncorrectURI,
NoHostElement,
IncorrectWebAssemblyModuleName,
InvalidNodeConfiguration,
WasmiModuleInializationError(wasmi::Error),
NodeCreationNotPermitted,
}
impl From<AddrParseError> for ConfigurationError {
fn from(error: AddrParseError) -> Self {
ConfigurationError::AddressParsingError(error)
}
}
impl std::fmt::Display for ConfigurationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
ConfigurationError::AddressParsingError(e) => {
write!(f, "Failed to parse an address: {}", e)
}
ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"),
ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"),
ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"),
ConfigurationError::IncorrectWebAssemblyModuleName => {
write!(f, "Incorrect WebAssembly module name")
}
ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"),
ConfigurationError::WasmiModuleInializationError(e) => {
write!(f, "Failed to initialize wasmi::Module: {}", e)
}
ConfigurationError::NodeCreationNotPermitted => {
write!(f, "Node creation not permitted")
}
}
}
}
/// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud
/// environments with WebAssembly support.
pub struct | {
pub application_configuration: ApplicationConfiguration,
pub permissions_configuration: PermissionsConfiguration,
pub secure_server_configuration: SecureServerConfiguration,
pub signature_table: SignatureTable,
pub kms_credentials: Option<std::path::PathBuf>,
}
impl NodeFactory<NodeConfiguration> for ServerNodeFactory {
fn create_node(
&self,
node_name: &str,
node_configuration: &NodeConfiguration,
) -> Result<CreatedNode, ConfigurationError> {
if!self
.permissions_configuration
.allowed_creation(node_configuration)
// TODO(#1027): Use anyhow or an improved ConfigurationError
.map_err(|_| ConfigurationError::InvalidNodeConfiguration)?
{
return Err(ConfigurationError::NodeCreationNotPermitted);
}
match &node_configuration.config_type {
Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode {
instance: Box::new(crypto::CryptoNode::new(
node_name,
self.kms_credentials.clone(),
)),
// TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive
// labelled plaintext data and emit unlabelled encrypted data (which would probably
// mean top_privilege() goes here).
privilege: NodePrivilege::default(),
}),
Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode {
instance: Box::new(logger::LogNode::new(node_name)),
// Allow the logger Node to declassify log messages in debug builds only.
#[cfg(feature = "oak-unsafe")]
privilege: NodePrivilege::top_privilege(),
// The logger must not have any declassification privilege in non-debug builds.
#[cfg(not(feature = "oak-unsafe"))]
privilege: NodePrivilege::default(),
}),
Some(ConfigType::GrpcServerConfig(config)) => {
let grpc_configuration = self
.secure_server_configuration
.grpc_config
.clone()
.expect("no gRPC identity provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(grpc::server::GrpcServerNode::new(
node_name,
config.clone(),
grpc_configuration
.grpc_server_tls_identity
.as_ref()
.expect("no gRPC server TLS identity provided to Oak Runtime")
.clone(),
grpc_configuration.oidc_client_info.clone(),
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the identity sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::WasmConfig(config)) => {
let wasm_module_bytes = self
.application_configuration
.wasm_modules
.get(&config.wasm_module_name)
.ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?;
Ok(CreatedNode {
instance: Box::new(wasm::WasmNode::new(
node_name,
wasm_module_bytes,
config.clone(),
)?),
privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table),
})
}
Some(ConfigType::GrpcClientConfig(config)) => {
let grpc_client_root_tls_certificate = self
.secure_server_configuration
.clone()
.grpc_config
.expect("no gRPC identity provided to Oak Runtime")
.grpc_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
let uri = config.uri.parse().map_err(|err| {
warn!("could not parse URI {}: {:?}", config.uri, err);
ConfigurationError::IncorrectURI
})?;
Ok(CreatedNode {
instance: Box::new(grpc::client::GrpcClientNode::new(
node_name,
&uri,
grpc_client_root_tls_certificate,
)?),
privilege: grpc::client::get_privilege(&uri),
})
}
Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode {
instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode {
instance: Box::new(storage::StorageNode::new(node_name)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::HttpServerConfig(config)) => {
let tls_config = self
.secure_server_configuration
.http_config
.clone()
.expect("no TLS configuration for HTTP servers provided to Oak Runtime")
.tls_config;
Ok(CreatedNode {
instance: Box::new(http::server::HttpServerNode::new(
node_name,
config.clone(),
tls_config,
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the `identity` sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::HttpClientConfig(config)) => {
let http_client_root_tls_certificate = self
.secure_server_configuration
.http_config
.clone()
.expect("no HTTP configuration provided to Oak Runtime")
.http_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(http::client::HttpClientNode::new(
node_name,
config.clone(),
http_client_root_tls_certificate,
)?),
privilege: http::client::get_privilege(&config.authority),
})
}
None => Err(ConfigurationError::InvalidNodeConfiguration),
}
}
}
/// A holder struct containing a [`Node`] instance, together with the [`NodePrivilege`] that is
/// assigned to it.
///
/// The reason the privilege is returned here and not by the node itself is that it should be
/// determined by the [`NodeFactory`] instance, so that an untrusted Node may not be able to
/// single-handedly increase its own privilege. In this codebase all the Nodes are defined alongside
/// each other, so this does not make much difference, but if Nodes were loaded dynamically or from
/// untrusted libraries, then it would be more obvious that we would still want to keep the
/// privilege assignment here.
pub struct CreatedNode {
pub instance: Box<dyn Node>,
pub privilege: NodePrivilege,
}
/// A trait implemented by a concrete factory of nodes that creates nodes based on a Node
/// configuration of type `T`.
pub trait NodeFactory<T> {
/// Creates a new [`Node`] instance with the specified name and based on the provided Node
/// configuration information.
fn create_node(
&self,
node_name: &str,
node_configuration: &T,
) -> Result<CreatedNode, ConfigurationError>;
}
// the `linear-handles` feature makes [`Sender`] and [`Receiver`] non-`Copy`, so we must use `Clone`
// with the feature turned on. But doing so with the feature off will make clippy complain, so we
// have this simple function that always uses the appropriate impl for copying these types.
// TODO(#1854): Remove this once linear-handles are the default.
#[cfg(not(feature = "linear-handles"))]
pub(crate) fn copy_or_clone<T: Copy>(t: &T) -> T {
*t
}
#[cfg(feature = "linear-handles")]
pub(crate) fn copy_or_clone<T: Clone>(t: &T) -> T {
t.clone()
}
| ServerNodeFactory | identifier_name |
mod.rs | //
// Copyright 2020 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! Functionality for different Node types.
use crate::{
permissions::PermissionsConfiguration, NodePrivilege, RuntimeProxy, SecureServerConfiguration,
SignatureTable,
};
use log::warn;
use oak_abi::proto::oak::application::{
node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration,
LogConfiguration, NodeConfiguration,
};
use std::net::AddrParseError;
use tokio::sync::oneshot;
mod crypto;
pub mod grpc;
pub mod http;
mod invocation;
mod logger;
mod roughtime;
mod storage;
mod wasm;
/// Trait encapsulating execution of a Node or pseudo-Node.
pub trait Node: Send {
/// Returns a name for this type of Node.
fn node_type(&self) -> &'static str;
/// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm
/// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes
/// that are part of the runtime) have no restrictions enforced on external communications.
///
/// Unless a node uses a trusted sandbox to restrict communications this function should always
/// return [`NodeIsolation::Uncontrolled`]
fn isolation(&self) -> NodeIsolation {
NodeIsolation::Uncontrolled
}
/// Executes the Node, using the provided `Runtime` reference and initial handle. The method
/// should continue execution until the Node terminates.
///
/// `notify_receiver` receives a notification from the Runtime upon termination. This
/// notification can be used by the Node to gracefully shut down.
fn run(
self: Box<Self>,
runtime: RuntimeProxy,
handle: oak_abi::Handle,
notify_receiver: oneshot::Receiver<()>,
);
}
/// Indication of the level of isolation of a node.
#[derive(Debug)]
pub enum NodeIsolation {
Sandboxed,
Uncontrolled,
}
/// A enumeration for errors occurring when creating a new [`Node`] instance.
// TODO(#1027): Improve or delete this enum.
#[derive(Debug)]
pub enum ConfigurationError {
AddressParsingError(AddrParseError),
IncorrectPort,
IncorrectURI,
NoHostElement,
IncorrectWebAssemblyModuleName,
InvalidNodeConfiguration,
WasmiModuleInializationError(wasmi::Error),
NodeCreationNotPermitted,
}
impl From<AddrParseError> for ConfigurationError {
fn from(error: AddrParseError) -> Self {
ConfigurationError::AddressParsingError(error)
}
}
impl std::fmt::Display for ConfigurationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
ConfigurationError::AddressParsingError(e) => {
write!(f, "Failed to parse an address: {}", e)
}
ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"),
ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"),
ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"),
ConfigurationError::IncorrectWebAssemblyModuleName => {
write!(f, "Incorrect WebAssembly module name")
}
ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"),
ConfigurationError::WasmiModuleInializationError(e) => {
write!(f, "Failed to initialize wasmi::Module: {}", e)
}
ConfigurationError::NodeCreationNotPermitted => {
write!(f, "Node creation not permitted")
}
}
}
}
/// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud
/// environments with WebAssembly support.
pub struct ServerNodeFactory {
pub application_configuration: ApplicationConfiguration,
pub permissions_configuration: PermissionsConfiguration,
pub secure_server_configuration: SecureServerConfiguration,
pub signature_table: SignatureTable,
pub kms_credentials: Option<std::path::PathBuf>,
}
impl NodeFactory<NodeConfiguration> for ServerNodeFactory {
fn create_node(
&self,
node_name: &str,
node_configuration: &NodeConfiguration,
) -> Result<CreatedNode, ConfigurationError> | }),
Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode {
instance: Box::new(logger::LogNode::new(node_name)),
// Allow the logger Node to declassify log messages in debug builds only.
#[cfg(feature = "oak-unsafe")]
privilege: NodePrivilege::top_privilege(),
// The logger must not have any declassification privilege in non-debug builds.
#[cfg(not(feature = "oak-unsafe"))]
privilege: NodePrivilege::default(),
}),
Some(ConfigType::GrpcServerConfig(config)) => {
let grpc_configuration = self
.secure_server_configuration
.grpc_config
.clone()
.expect("no gRPC identity provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(grpc::server::GrpcServerNode::new(
node_name,
config.clone(),
grpc_configuration
.grpc_server_tls_identity
.as_ref()
.expect("no gRPC server TLS identity provided to Oak Runtime")
.clone(),
grpc_configuration.oidc_client_info.clone(),
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the identity sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::WasmConfig(config)) => {
let wasm_module_bytes = self
.application_configuration
.wasm_modules
.get(&config.wasm_module_name)
.ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?;
Ok(CreatedNode {
instance: Box::new(wasm::WasmNode::new(
node_name,
wasm_module_bytes,
config.clone(),
)?),
privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table),
})
}
Some(ConfigType::GrpcClientConfig(config)) => {
let grpc_client_root_tls_certificate = self
.secure_server_configuration
.clone()
.grpc_config
.expect("no gRPC identity provided to Oak Runtime")
.grpc_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
let uri = config.uri.parse().map_err(|err| {
warn!("could not parse URI {}: {:?}", config.uri, err);
ConfigurationError::IncorrectURI
})?;
Ok(CreatedNode {
instance: Box::new(grpc::client::GrpcClientNode::new(
node_name,
&uri,
grpc_client_root_tls_certificate,
)?),
privilege: grpc::client::get_privilege(&uri),
})
}
Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode {
instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode {
instance: Box::new(storage::StorageNode::new(node_name)),
privilege: NodePrivilege::default(),
}),
Some(ConfigType::HttpServerConfig(config)) => {
let tls_config = self
.secure_server_configuration
.http_config
.clone()
.expect("no TLS configuration for HTTP servers provided to Oak Runtime")
.tls_config;
Ok(CreatedNode {
instance: Box::new(http::server::HttpServerNode::new(
node_name,
config.clone(),
tls_config,
)?),
// This node needs to have `top` privilege to be able to declassify data tagged
// with any arbitrary user identities.
// TODO(#1631): When we have a separate top for each sub-lattice, this should be
// changed to the top of the `identity` sub-lattice.
privilege: NodePrivilege::top_privilege(),
})
}
Some(ConfigType::HttpClientConfig(config)) => {
let http_client_root_tls_certificate = self
.secure_server_configuration
.http_config
.clone()
.expect("no HTTP configuration provided to Oak Runtime")
.http_client_root_tls_certificate
.expect("no root TLS certificate provided to Oak Runtime");
Ok(CreatedNode {
instance: Box::new(http::client::HttpClientNode::new(
node_name,
config.clone(),
http_client_root_tls_certificate,
)?),
privilege: http::client::get_privilege(&config.authority),
})
}
None => Err(ConfigurationError::InvalidNodeConfiguration),
}
}
}
/// A holder struct containing a [`Node`] instance, together with the [`NodePrivilege`] that is
/// assigned to it.
///
/// The reason the privilege is returned here and not by the node itself is that it should be
/// determined by the [`NodeFactory`] instance, so that an untrusted Node may not be able to
/// single-handedly increase its own privilege. In this codebase all the Nodes are defined alongside
/// each other, so this does not make much difference, but if Nodes were loaded dynamically or from
/// untrusted libraries, then it would be more obvious that we would still want to keep the
/// privilege assignment here.
pub struct CreatedNode {
pub instance: Box<dyn Node>,
pub privilege: NodePrivilege,
}
/// A trait implemented by a concrete factory of nodes that creates nodes based on a Node
/// configuration of type `T`.
pub trait NodeFactory<T> {
/// Creates a new [`Node`] instance with the specified name and based on the provided Node
/// configuration information.
fn create_node(
&self,
node_name: &str,
node_configuration: &T,
) -> Result<CreatedNode, ConfigurationError>;
}
// the `linear-handles` feature makes [`Sender`] and [`Receiver`] non-`Copy`, so we must use `Clone`
// with the feature turned on. But doing so with the feature off will make clippy complain, so we
// have this simple function that always uses the appropriate impl for copying these types.
// TODO(#1854): Remove this once linear-handles are the default.
#[cfg(not(feature = "linear-handles"))]
pub(crate) fn copy_or_clone<T: Copy>(t: &T) -> T {
*t
}
#[cfg(feature = "linear-handles")]
pub(crate) fn copy_or_clone<T: Clone>(t: &T) -> T {
t.clone()
}
| {
if !self
.permissions_configuration
.allowed_creation(node_configuration)
// TODO(#1027): Use anyhow or an improved ConfigurationError
.map_err(|_| ConfigurationError::InvalidNodeConfiguration)?
{
return Err(ConfigurationError::NodeCreationNotPermitted);
}
match &node_configuration.config_type {
Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode {
instance: Box::new(crypto::CryptoNode::new(
node_name,
self.kms_credentials.clone(),
)),
// TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive
// labelled plaintext data and emit unlabelled encrypted data (which would probably
// mean top_privilege() goes here).
privilege: NodePrivilege::default(), | identifier_body |
parse.rs | use std::collections::HashSet;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Point {
pub row: usize,
pub col: usize,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct TBox(pub Point, pub Point);
pub struct Lines(pub Vec<Vec<char>>);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Direction {
Up,
Dn,
Lt,
Rt,
}
impl Direction {
const VALUES: [Direction; 4] = [Direction::Up, Direction::Dn, Direction::Lt, Direction::Rt];
#[inline]
fn rev(self) -> Direction {
use Direction::*;
match self {
Up => Dn,
Dn => Up,
Lt => Rt,
Rt => Lt,
}
}
}
#[inline]
fn can_go(c: char, d: Direction) -> bool {
use Direction::*;
match (c, d) {
('|', Up | Dn) | ('-', Lt | Rt) => true,
('.', Dn | Lt) => true,
(',', Dn | Rt) => true,
('\'', Up | Lt | Rt) => true,
('<', Rt) | ('>', Lt) | ('^', Up) | ('v', Dn) => true,
_ => false,
}
}
impl From<(usize, usize)> for Point {
#[inline]
fn from(p: (usize, usize)) -> Point {
Point { row: p.0, col: p.1 }
}
}
impl std::fmt::Debug for Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", (self.row, self.col))
}
}
impl Point {
#[inline]
fn in_dir(self, d: Direction) -> Option<Point> {
use Direction::*;
match d {
Dn => Some(Point::from((self.row + 1, self.col))),
Up => {
if self.row == 0 {
None
} else {
Some(Point::from((self.row - 1, self.col)))
}
}
Rt => Some(Point::from((self.row, self.col + 1))),
Lt => {
if self.col == 0 {
None
} else {
Some(Point::from((self.row, self.col - 1)))
}
}
}
}
}
impl From<(Point, Point)> for TBox {
#[inline]
fn from(b: (Point, Point)) -> TBox {
use std::cmp::{max, min};
TBox(
(min(b.0.row, b.1.row), min(b.0.col, b.1.col)).into(),
(max(b.0.row, b.1.row), max(b.0.col, b.1.col)).into(),
)
}
}
impl std::fmt::Debug for TBox {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
{
ret.push((p, c));
}
}
}
ret
}
fn scan_dir(lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if!can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if!can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d|!cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if!lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2!= br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(|(b, d)| b.points().map(move |p| (p, d)))
.collect()
}
pub fn path_contains(pth: &Vec<Point>, p: Point) -> bool {
let mut it = pth.iter();
let fst = it.next();
if!fst.is_some() {
return false;
}
let mut last = fst.unwrap();
if *last == p {
return true;
}
while let Some(next) = it.next() {
if TBox::from((*last, *next)).contains(p) {
return true;
}
last = next;
}
false
}
pub fn edges(lines: &Lines, boxes: &Vec<TBox>) -> HashSet<Vec<Point>> {
// ###
// ,---. ##
// #| |,--. find all possible starts for edges between boxes
// '---''--'
// ### ##
boxes
.iter()
.map(|b| border(*b))
.flat_map(|v| v.into_iter())
.filter(|(p, d)| lines.at(*p).map(|c| can_go(c, d.rev())).unwrap_or(false))
.map(|(p, d)| scan_path(lines, p, d))
.filter(|pth| pth.len() > 0)
.fold(HashSet::new(), |mut map, mut pth| {
// checking the forward path then inserting
// the reverse means we don't double-count paths
if!map.contains(&pth) {
pth.reverse();
map.insert(pth);
}
map
})
}
#[cfg(test)]
mod test {
use super::*;
fn lines() -> Lines {
let lines: Vec<Vec<char>> = r#"
,---.,-----------.
| |',-. |
| | | | ,-----'
'---' | | |
| |--'
'-'
"#
.lines()
.map(|l| l.chars().collect())
.collect();
Lines(lines)
}
#[test]
fn test_top_lefts() {
let lines = lines();
assert_eq!(
vec![
(Point { row: 1, col: 1 }, ','),
(Point { row: 1, col: 6 }, ','),
(Point { row: 2, col: 7 }, ','),
(Point { row: 3, col: 12 }, ','),
],
top_lefts(&lines)
);
}
#[test]
fn test_scan_dir() {
let lines = lines();
let tl = Point { row: 1, col: 1 };
let tr = Point { row: 1, col: 5 };
let bl = Point { row: 4, col: 1 };
let br = Point { row: 4, col: 5 };
assert_eq!(Some((tr, '.')), scan_dir(&lines, tl, Direction::Rt),);
assert_eq!(Some((bl, '\'')), scan_dir(&lines, tl, Direction::Dn),);
assert_eq!(Some((br, '\'')), scan_dir(&lines, bl, Direction::Rt),);
assert_eq!(
Some((Point { row: 1, col: 18 }, '.')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Rt),
);
assert_eq!(
Some((Point { row: 2, col: 6 }, '\'')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Dn),
);
assert_eq!(
Some((Point { row: 1, col: 6 }, ',')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Lt),
);
}
#[test]
fn test_boxes() {
let lines = lines();
assert_eq!(
vec![
TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 }),
TBox(Point { row: 2, col: 7 }, Point { row: 6, col: 9 }),
],
boxes(&lines),
);
}
#[test]
fn test_scan_path() {
let lines = lines();
let mut pth = vec![
Point { row: 2, col: 6 },
Point { row: 1, col: 6 },
Point { row: 1, col: 18 },
Point { row: 3, col: 18 },
Point { row: 3, col: 12 },
Point { row: 5, col: 12 },
Point { row: 5, col: 10 },
];
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// should work in reverse
pth.reverse();
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// |--' |--'
// ^ ^
// instead of the beginning, start a little aways
pth[0].col += 1;
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
}
#[test]
fn test_box_contains() {
let lb = TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 });
assert_eq!(true, lb.contains(lb.0) && lb.contains(lb.1));
assert_eq!(false, lb.contains(Point { row: 5, col: 4 }),);
}
#[test]
fn test_border() | border(b)
)
}
}
| {
let b = TBox(Point { row: 1, col: 1 }, Point { row: 3, col: 4 });
use Direction::*;
assert_eq!(
vec![
(Point { row: 0, col: 1 }, Up),
(Point { row: 0, col: 2 }, Up),
(Point { row: 0, col: 3 }, Up),
(Point { row: 0, col: 4 }, Up),
(Point { row: 4, col: 1 }, Dn),
(Point { row: 4, col: 2 }, Dn),
(Point { row: 4, col: 3 }, Dn),
(Point { row: 4, col: 4 }, Dn),
(Point { row: 1, col: 0 }, Lt),
(Point { row: 2, col: 0 }, Lt),
(Point { row: 3, col: 0 }, Lt),
(Point { row: 1, col: 5 }, Rt),
(Point { row: 2, col: 5 }, Rt),
(Point { row: 3, col: 5 }, Rt),
], | identifier_body |
parse.rs | use std::collections::HashSet;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Point {
pub row: usize,
pub col: usize,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct TBox(pub Point, pub Point);
pub struct Lines(pub Vec<Vec<char>>);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Direction {
Up,
Dn,
Lt,
Rt,
}
impl Direction {
const VALUES: [Direction; 4] = [Direction::Up, Direction::Dn, Direction::Lt, Direction::Rt];
#[inline]
fn rev(self) -> Direction {
use Direction::*;
match self {
Up => Dn,
Dn => Up,
Lt => Rt,
Rt => Lt,
}
}
}
#[inline]
fn can_go(c: char, d: Direction) -> bool {
use Direction::*;
match (c, d) {
('|', Up | Dn) | ('-', Lt | Rt) => true,
('.', Dn | Lt) => true,
(',', Dn | Rt) => true,
('\'', Up | Lt | Rt) => true,
('<', Rt) | ('>', Lt) | ('^', Up) | ('v', Dn) => true,
_ => false,
}
}
impl From<(usize, usize)> for Point {
#[inline]
fn from(p: (usize, usize)) -> Point {
Point { row: p.0, col: p.1 }
}
}
impl std::fmt::Debug for Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", (self.row, self.col))
}
}
impl Point {
#[inline]
fn in_dir(self, d: Direction) -> Option<Point> {
use Direction::*;
match d {
Dn => Some(Point::from((self.row + 1, self.col))),
Up => {
if self.row == 0 {
None
} else {
Some(Point::from((self.row - 1, self.col)))
}
}
Rt => Some(Point::from((self.row, self.col + 1))),
Lt => {
if self.col == 0 {
None
} else {
Some(Point::from((self.row, self.col - 1)))
}
}
}
}
}
impl From<(Point, Point)> for TBox {
#[inline]
fn from(b: (Point, Point)) -> TBox {
use std::cmp::{max, min};
TBox(
(min(b.0.row, b.1.row), min(b.0.col, b.1.col)).into(),
(max(b.0.row, b.1.row), max(b.0.col, b.1.col)).into(),
)
}
}
impl std::fmt::Debug for TBox {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
{
ret.push((p, c));
}
}
}
ret
}
fn | (lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if!can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if!can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d|!cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if!lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2!= br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(|(b, d)| b.points().map(move |p| (p, d)))
.collect()
}
pub fn path_contains(pth: &Vec<Point>, p: Point) -> bool {
let mut it = pth.iter();
let fst = it.next();
if!fst.is_some() {
return false;
}
let mut last = fst.unwrap();
if *last == p {
return true;
}
while let Some(next) = it.next() {
if TBox::from((*last, *next)).contains(p) {
return true;
}
last = next;
}
false
}
pub fn edges(lines: &Lines, boxes: &Vec<TBox>) -> HashSet<Vec<Point>> {
// ###
// ,---. ##
// #| |,--. find all possible starts for edges between boxes
// '---''--'
// ### ##
boxes
.iter()
.map(|b| border(*b))
.flat_map(|v| v.into_iter())
.filter(|(p, d)| lines.at(*p).map(|c| can_go(c, d.rev())).unwrap_or(false))
.map(|(p, d)| scan_path(lines, p, d))
.filter(|pth| pth.len() > 0)
.fold(HashSet::new(), |mut map, mut pth| {
// checking the forward path then inserting
// the reverse means we don't double-count paths
if!map.contains(&pth) {
pth.reverse();
map.insert(pth);
}
map
})
}
#[cfg(test)]
mod test {
use super::*;
fn lines() -> Lines {
let lines: Vec<Vec<char>> = r#"
,---.,-----------.
| |',-. |
| | | | ,-----'
'---' | | |
| |--'
'-'
"#
.lines()
.map(|l| l.chars().collect())
.collect();
Lines(lines)
}
#[test]
fn test_top_lefts() {
let lines = lines();
assert_eq!(
vec![
(Point { row: 1, col: 1 }, ','),
(Point { row: 1, col: 6 }, ','),
(Point { row: 2, col: 7 }, ','),
(Point { row: 3, col: 12 }, ','),
],
top_lefts(&lines)
);
}
#[test]
fn test_scan_dir() {
let lines = lines();
let tl = Point { row: 1, col: 1 };
let tr = Point { row: 1, col: 5 };
let bl = Point { row: 4, col: 1 };
let br = Point { row: 4, col: 5 };
assert_eq!(Some((tr, '.')), scan_dir(&lines, tl, Direction::Rt),);
assert_eq!(Some((bl, '\'')), scan_dir(&lines, tl, Direction::Dn),);
assert_eq!(Some((br, '\'')), scan_dir(&lines, bl, Direction::Rt),);
assert_eq!(
Some((Point { row: 1, col: 18 }, '.')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Rt),
);
assert_eq!(
Some((Point { row: 2, col: 6 }, '\'')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Dn),
);
assert_eq!(
Some((Point { row: 1, col: 6 }, ',')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Lt),
);
}
#[test]
fn test_boxes() {
let lines = lines();
assert_eq!(
vec![
TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 }),
TBox(Point { row: 2, col: 7 }, Point { row: 6, col: 9 }),
],
boxes(&lines),
);
}
#[test]
fn test_scan_path() {
let lines = lines();
let mut pth = vec![
Point { row: 2, col: 6 },
Point { row: 1, col: 6 },
Point { row: 1, col: 18 },
Point { row: 3, col: 18 },
Point { row: 3, col: 12 },
Point { row: 5, col: 12 },
Point { row: 5, col: 10 },
];
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// should work in reverse
pth.reverse();
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// |--' |--'
// ^ ^
// instead of the beginning, start a little aways
pth[0].col += 1;
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
}
#[test]
fn test_box_contains() {
let lb = TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 });
assert_eq!(true, lb.contains(lb.0) && lb.contains(lb.1));
assert_eq!(false, lb.contains(Point { row: 5, col: 4 }),);
}
#[test]
fn test_border() {
let b = TBox(Point { row: 1, col: 1 }, Point { row: 3, col: 4 });
use Direction::*;
assert_eq!(
vec![
(Point { row: 0, col: 1 }, Up),
(Point { row: 0, col: 2 }, Up),
(Point { row: 0, col: 3 }, Up),
(Point { row: 0, col: 4 }, Up),
(Point { row: 4, col: 1 }, Dn),
(Point { row: 4, col: 2 }, Dn),
(Point { row: 4, col: 3 }, Dn),
(Point { row: 4, col: 4 }, Dn),
(Point { row: 1, col: 0 }, Lt),
(Point { row: 2, col: 0 }, Lt),
(Point { row: 3, col: 0 }, Lt),
(Point { row: 1, col: 5 }, Rt),
(Point { row: 2, col: 5 }, Rt),
(Point { row: 3, col: 5 }, Rt),
],
border(b)
)
}
}
| scan_dir | identifier_name |
parse.rs | use std::collections::HashSet;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Point {
pub row: usize,
pub col: usize,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct TBox(pub Point, pub Point);
pub struct Lines(pub Vec<Vec<char>>);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Direction {
Up,
Dn,
Lt,
Rt,
}
impl Direction {
const VALUES: [Direction; 4] = [Direction::Up, Direction::Dn, Direction::Lt, Direction::Rt];
#[inline]
fn rev(self) -> Direction {
use Direction::*;
match self {
Up => Dn,
Dn => Up,
Lt => Rt,
Rt => Lt,
}
}
}
#[inline]
fn can_go(c: char, d: Direction) -> bool {
use Direction::*;
match (c, d) {
('|', Up | Dn) | ('-', Lt | Rt) => true,
('.', Dn | Lt) => true,
(',', Dn | Rt) => true,
('\'', Up | Lt | Rt) => true,
('<', Rt) | ('>', Lt) | ('^', Up) | ('v', Dn) => true,
_ => false,
}
}
impl From<(usize, usize)> for Point {
#[inline]
fn from(p: (usize, usize)) -> Point {
Point { row: p.0, col: p.1 }
}
}
impl std::fmt::Debug for Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", (self.row, self.col))
}
}
impl Point {
#[inline]
fn in_dir(self, d: Direction) -> Option<Point> {
use Direction::*;
match d {
Dn => Some(Point::from((self.row + 1, self.col))),
Up => {
if self.row == 0 {
None
} else {
Some(Point::from((self.row - 1, self.col)))
}
}
Rt => Some(Point::from((self.row, self.col + 1))),
Lt => {
if self.col == 0 {
None
} else {
Some(Point::from((self.row, self.col - 1)))
}
}
}
}
}
impl From<(Point, Point)> for TBox {
#[inline]
fn from(b: (Point, Point)) -> TBox {
use std::cmp::{max, min};
TBox(
(min(b.0.row, b.1.row), min(b.0.col, b.1.col)).into(),
(max(b.0.row, b.1.row), max(b.0.col, b.1.col)).into(),
)
}
}
impl std::fmt::Debug for TBox {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
|
}
}
ret
}
fn scan_dir(lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if!can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if!can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d|!cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if!lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2!= br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(|(b, d)| b.points().map(move |p| (p, d)))
.collect()
}
pub fn path_contains(pth: &Vec<Point>, p: Point) -> bool {
let mut it = pth.iter();
let fst = it.next();
if!fst.is_some() {
return false;
}
let mut last = fst.unwrap();
if *last == p {
return true;
}
while let Some(next) = it.next() {
if TBox::from((*last, *next)).contains(p) {
return true;
}
last = next;
}
false
}
pub fn edges(lines: &Lines, boxes: &Vec<TBox>) -> HashSet<Vec<Point>> {
// ###
// ,---. ##
// #| |,--. find all possible starts for edges between boxes
// '---''--'
// ### ##
boxes
.iter()
.map(|b| border(*b))
.flat_map(|v| v.into_iter())
.filter(|(p, d)| lines.at(*p).map(|c| can_go(c, d.rev())).unwrap_or(false))
.map(|(p, d)| scan_path(lines, p, d))
.filter(|pth| pth.len() > 0)
.fold(HashSet::new(), |mut map, mut pth| {
// checking the forward path then inserting
// the reverse means we don't double-count paths
if!map.contains(&pth) {
pth.reverse();
map.insert(pth);
}
map
})
}
#[cfg(test)]
mod test {
use super::*;
fn lines() -> Lines {
let lines: Vec<Vec<char>> = r#"
,---.,-----------.
| |',-. |
| | | | ,-----'
'---' | | |
| |--'
'-'
"#
.lines()
.map(|l| l.chars().collect())
.collect();
Lines(lines)
}
#[test]
fn test_top_lefts() {
let lines = lines();
assert_eq!(
vec![
(Point { row: 1, col: 1 }, ','),
(Point { row: 1, col: 6 }, ','),
(Point { row: 2, col: 7 }, ','),
(Point { row: 3, col: 12 }, ','),
],
top_lefts(&lines)
);
}
#[test]
fn test_scan_dir() {
let lines = lines();
let tl = Point { row: 1, col: 1 };
let tr = Point { row: 1, col: 5 };
let bl = Point { row: 4, col: 1 };
let br = Point { row: 4, col: 5 };
assert_eq!(Some((tr, '.')), scan_dir(&lines, tl, Direction::Rt),);
assert_eq!(Some((bl, '\'')), scan_dir(&lines, tl, Direction::Dn),);
assert_eq!(Some((br, '\'')), scan_dir(&lines, bl, Direction::Rt),);
assert_eq!(
Some((Point { row: 1, col: 18 }, '.')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Rt),
);
assert_eq!(
Some((Point { row: 2, col: 6 }, '\'')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Dn),
);
assert_eq!(
Some((Point { row: 1, col: 6 }, ',')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Lt),
);
}
#[test]
fn test_boxes() {
let lines = lines();
assert_eq!(
vec![
TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 }),
TBox(Point { row: 2, col: 7 }, Point { row: 6, col: 9 }),
],
boxes(&lines),
);
}
#[test]
fn test_scan_path() {
let lines = lines();
let mut pth = vec![
Point { row: 2, col: 6 },
Point { row: 1, col: 6 },
Point { row: 1, col: 18 },
Point { row: 3, col: 18 },
Point { row: 3, col: 12 },
Point { row: 5, col: 12 },
Point { row: 5, col: 10 },
];
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// should work in reverse
pth.reverse();
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// |--' |--'
// ^ ^
// instead of the beginning, start a little aways
pth[0].col += 1;
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
}
#[test]
fn test_box_contains() {
let lb = TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 });
assert_eq!(true, lb.contains(lb.0) && lb.contains(lb.1));
assert_eq!(false, lb.contains(Point { row: 5, col: 4 }),);
}
#[test]
fn test_border() {
let b = TBox(Point { row: 1, col: 1 }, Point { row: 3, col: 4 });
use Direction::*;
assert_eq!(
vec![
(Point { row: 0, col: 1 }, Up),
(Point { row: 0, col: 2 }, Up),
(Point { row: 0, col: 3 }, Up),
(Point { row: 0, col: 4 }, Up),
(Point { row: 4, col: 1 }, Dn),
(Point { row: 4, col: 2 }, Dn),
(Point { row: 4, col: 3 }, Dn),
(Point { row: 4, col: 4 }, Dn),
(Point { row: 1, col: 0 }, Lt),
(Point { row: 2, col: 0 }, Lt),
(Point { row: 3, col: 0 }, Lt),
(Point { row: 1, col: 5 }, Rt),
(Point { row: 2, col: 5 }, Rt),
(Point { row: 3, col: 5 }, Rt),
],
border(b)
)
}
}
| {
ret.push((p, c));
} | conditional_block |
parse.rs | use std::collections::HashSet;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Point {
pub row: usize,
pub col: usize,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct TBox(pub Point, pub Point);
pub struct Lines(pub Vec<Vec<char>>);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Direction {
Up,
Dn,
Lt,
Rt,
}
impl Direction {
const VALUES: [Direction; 4] = [Direction::Up, Direction::Dn, Direction::Lt, Direction::Rt];
#[inline]
fn rev(self) -> Direction {
use Direction::*;
match self {
Up => Dn,
Dn => Up,
Lt => Rt,
Rt => Lt,
}
}
}
#[inline]
fn can_go(c: char, d: Direction) -> bool {
use Direction::*;
match (c, d) {
('|', Up | Dn) | ('-', Lt | Rt) => true,
('.', Dn | Lt) => true,
(',', Dn | Rt) => true,
('\'', Up | Lt | Rt) => true,
('<', Rt) | ('>', Lt) | ('^', Up) | ('v', Dn) => true,
_ => false,
}
}
impl From<(usize, usize)> for Point {
#[inline]
fn from(p: (usize, usize)) -> Point {
Point { row: p.0, col: p.1 }
}
}
impl std::fmt::Debug for Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", (self.row, self.col))
}
}
impl Point {
#[inline]
fn in_dir(self, d: Direction) -> Option<Point> {
use Direction::*;
match d {
Dn => Some(Point::from((self.row + 1, self.col))),
Up => {
if self.row == 0 {
None
} else {
Some(Point::from((self.row - 1, self.col)))
}
}
Rt => Some(Point::from((self.row, self.col + 1))),
Lt => {
if self.col == 0 {
None
} else {
Some(Point::from((self.row, self.col - 1)))
}
}
}
}
}
impl From<(Point, Point)> for TBox {
#[inline]
fn from(b: (Point, Point)) -> TBox {
use std::cmp::{max, min};
TBox(
(min(b.0.row, b.1.row), min(b.0.col, b.1.col)).into(),
(max(b.0.row, b.1.row), max(b.0.col, b.1.col)).into(),
)
}
}
impl std::fmt::Debug for TBox {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
{
ret.push((p, c));
}
}
}
ret
}
fn scan_dir(lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if!can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if!can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d|!cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None; | }
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if!lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2!= br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(|(b, d)| b.points().map(move |p| (p, d)))
.collect()
}
pub fn path_contains(pth: &Vec<Point>, p: Point) -> bool {
let mut it = pth.iter();
let fst = it.next();
if!fst.is_some() {
return false;
}
let mut last = fst.unwrap();
if *last == p {
return true;
}
while let Some(next) = it.next() {
if TBox::from((*last, *next)).contains(p) {
return true;
}
last = next;
}
false
}
pub fn edges(lines: &Lines, boxes: &Vec<TBox>) -> HashSet<Vec<Point>> {
// ###
// ,---. ##
// #| |,--. find all possible starts for edges between boxes
// '---''--'
// ### ##
boxes
.iter()
.map(|b| border(*b))
.flat_map(|v| v.into_iter())
.filter(|(p, d)| lines.at(*p).map(|c| can_go(c, d.rev())).unwrap_or(false))
.map(|(p, d)| scan_path(lines, p, d))
.filter(|pth| pth.len() > 0)
.fold(HashSet::new(), |mut map, mut pth| {
// checking the forward path then inserting
// the reverse means we don't double-count paths
if!map.contains(&pth) {
pth.reverse();
map.insert(pth);
}
map
})
}
#[cfg(test)]
mod test {
use super::*;
fn lines() -> Lines {
let lines: Vec<Vec<char>> = r#"
,---.,-----------.
| |',-. |
| | | | ,-----'
'---' | | |
| |--'
'-'
"#
.lines()
.map(|l| l.chars().collect())
.collect();
Lines(lines)
}
#[test]
fn test_top_lefts() {
let lines = lines();
assert_eq!(
vec![
(Point { row: 1, col: 1 }, ','),
(Point { row: 1, col: 6 }, ','),
(Point { row: 2, col: 7 }, ','),
(Point { row: 3, col: 12 }, ','),
],
top_lefts(&lines)
);
}
#[test]
fn test_scan_dir() {
let lines = lines();
let tl = Point { row: 1, col: 1 };
let tr = Point { row: 1, col: 5 };
let bl = Point { row: 4, col: 1 };
let br = Point { row: 4, col: 5 };
assert_eq!(Some((tr, '.')), scan_dir(&lines, tl, Direction::Rt),);
assert_eq!(Some((bl, '\'')), scan_dir(&lines, tl, Direction::Dn),);
assert_eq!(Some((br, '\'')), scan_dir(&lines, bl, Direction::Rt),);
assert_eq!(
Some((Point { row: 1, col: 18 }, '.')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Rt),
);
assert_eq!(
Some((Point { row: 2, col: 6 }, '\'')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Dn),
);
assert_eq!(
Some((Point { row: 1, col: 6 }, ',')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Lt),
);
}
#[test]
fn test_boxes() {
let lines = lines();
assert_eq!(
vec![
TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 }),
TBox(Point { row: 2, col: 7 }, Point { row: 6, col: 9 }),
],
boxes(&lines),
);
}
#[test]
fn test_scan_path() {
let lines = lines();
let mut pth = vec![
Point { row: 2, col: 6 },
Point { row: 1, col: 6 },
Point { row: 1, col: 18 },
Point { row: 3, col: 18 },
Point { row: 3, col: 12 },
Point { row: 5, col: 12 },
Point { row: 5, col: 10 },
];
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// should work in reverse
pth.reverse();
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// |--' |--'
// ^ ^
// instead of the beginning, start a little aways
pth[0].col += 1;
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
}
#[test]
fn test_box_contains() {
let lb = TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 });
assert_eq!(true, lb.contains(lb.0) && lb.contains(lb.1));
assert_eq!(false, lb.contains(Point { row: 5, col: 4 }),);
}
#[test]
fn test_border() {
let b = TBox(Point { row: 1, col: 1 }, Point { row: 3, col: 4 });
use Direction::*;
assert_eq!(
vec![
(Point { row: 0, col: 1 }, Up),
(Point { row: 0, col: 2 }, Up),
(Point { row: 0, col: 3 }, Up),
(Point { row: 0, col: 4 }, Up),
(Point { row: 4, col: 1 }, Dn),
(Point { row: 4, col: 2 }, Dn),
(Point { row: 4, col: 3 }, Dn),
(Point { row: 4, col: 4 }, Dn),
(Point { row: 1, col: 0 }, Lt),
(Point { row: 2, col: 0 }, Lt),
(Point { row: 3, col: 0 }, Lt),
(Point { row: 1, col: 5 }, Rt),
(Point { row: 2, col: 5 }, Rt),
(Point { row: 3, col: 5 }, Rt),
],
border(b)
)
}
} | }
}
} | random_line_split |
thread.rs | use {
super::process::Process,
super::*,
crate::object::*,
alloc::{boxed::Box, sync::Arc},
core::{
any::Any,
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
spin::Mutex,
};
pub use self::thread_state::*;
mod thread_state;
/// Runnable / computation entity
///
/// ## SYNOPSIS
///
/// TODO
///
/// ## DESCRIPTION
///
/// The thread object is the construct that represents a time-shared CPU execution
/// context. Thread objects live associated to a particular
/// [Process Object](crate::task::Process) which provides the memory and the handles to other
/// objects necessary for I/O and computation.
///
/// ### Lifetime
/// Threads are created by calling [`Thread::create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn | (
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1000];
let stack_top = stack.as_mut_ptr() as usize + 0x1000;
// global variable for validation
static ARG1: AtomicUsize = AtomicUsize::new(0);
static ARG2: AtomicUsize = AtomicUsize::new(0);
// function for new thread
#[allow(unsafe_code)]
unsafe extern "C" fn entry(arg1: usize, arg2: usize) ->! {
ARG1.store(arg1, Ordering::SeqCst);
ARG2.store(arg2, Ordering::SeqCst);
kernel_hal_unix::syscall_entry();
unreachable!();
}
// start a new thread
let thread_ref_count = Arc::strong_count(&thread);
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2)
.expect("failed to start thread");
// wait 100ms for the new thread to exit
std::thread::sleep(core::time::Duration::from_millis(100));
// validate the thread have started and received correct arguments
assert_eq!(ARG1.load(Ordering::SeqCst), 0);
assert_eq!(ARG2.load(Ordering::SeqCst), 2);
// no other references to `Thread`
assert_eq!(Arc::strong_count(&thread), thread_ref_count);
// start again should fail
assert_eq!(
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
// start another thread should fail
assert_eq!(
proc.start(&thread1, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
}
}
| start | identifier_name |
thread.rs | use {
super::process::Process,
super::*,
crate::object::*,
alloc::{boxed::Box, sync::Arc},
core::{
any::Any,
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
spin::Mutex,
};
pub use self::thread_state::*;
mod thread_state;
/// Runnable / computation entity
///
/// ## SYNOPSIS
///
/// TODO
///
/// ## DESCRIPTION
///
/// The thread object is the construct that represents a time-shared CPU execution
/// context. Thread objects live associated to a particular
/// [Process Object](crate::task::Process) which provides the memory and the handles to other
/// objects necessary for I/O and computation.
///
/// ### Lifetime
/// Threads are created by calling [`Thread::create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() |
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1000];
let stack_top = stack.as_mut_ptr() as usize + 0x1000;
// global variable for validation
static ARG1: AtomicUsize = AtomicUsize::new(0);
static ARG2: AtomicUsize = AtomicUsize::new(0);
// function for new thread
#[allow(unsafe_code)]
unsafe extern "C" fn entry(arg1: usize, arg2: usize) ->! {
ARG1.store(arg1, Ordering::SeqCst);
ARG2.store(arg2, Ordering::SeqCst);
kernel_hal_unix::syscall_entry();
unreachable!();
}
// start a new thread
let thread_ref_count = Arc::strong_count(&thread);
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2)
.expect("failed to start thread");
// wait 100ms for the new thread to exit
std::thread::sleep(core::time::Duration::from_millis(100));
// validate the thread have started and received correct arguments
assert_eq!(ARG1.load(Ordering::SeqCst), 0);
assert_eq!(ARG2.load(Ordering::SeqCst), 2);
// no other references to `Thread`
assert_eq!(Arc::strong_count(&thread), thread_ref_count);
// start again should fail
assert_eq!(
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
// start another thread should fail
assert_eq!(
proc.start(&thread1, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
}
}
| {
state.general = old_state.general;
} | conditional_block |
thread.rs | use {
super::process::Process,
super::*,
crate::object::*,
alloc::{boxed::Box, sync::Arc},
core::{
any::Any,
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
spin::Mutex,
};
pub use self::thread_state::*;
mod thread_state;
/// Runnable / computation entity
///
/// ## SYNOPSIS
///
/// TODO
///
/// ## DESCRIPTION
///
/// The thread object is the construct that represents a time-shared CPU execution
/// context. Thread objects live associated to a particular
/// [Process Object](crate::task::Process) which provides the memory and the handles to other
/// objects necessary for I/O and computation.
///
/// ### Lifetime
/// Threads are created by calling [`Thread::create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending | thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1000];
let stack_top = stack.as_mut_ptr() as usize + 0x1000;
// global variable for validation
static ARG1: AtomicUsize = AtomicUsize::new(0);
static ARG2: AtomicUsize = AtomicUsize::new(0);
// function for new thread
#[allow(unsafe_code)]
unsafe extern "C" fn entry(arg1: usize, arg2: usize) ->! {
ARG1.store(arg1, Ordering::SeqCst);
ARG2.store(arg2, Ordering::SeqCst);
kernel_hal_unix::syscall_entry();
unreachable!();
}
// start a new thread
let thread_ref_count = Arc::strong_count(&thread);
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2)
.expect("failed to start thread");
// wait 100ms for the new thread to exit
std::thread::sleep(core::time::Duration::from_millis(100));
// validate the thread have started and received correct arguments
assert_eq!(ARG1.load(Ordering::SeqCst), 0);
assert_eq!(ARG2.load(Ordering::SeqCst), 2);
// no other references to `Thread`
assert_eq!(Arc::strong_count(&thread), thread_ref_count);
// start again should fail
assert_eq!(
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
// start another thread should fail
assert_eq!(
proc.start(&thread1, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
}
} | }
}
}
RunnableChecker { | random_line_split |
thread.rs | use {
super::process::Process,
super::*,
crate::object::*,
alloc::{boxed::Box, sync::Arc},
core::{
any::Any,
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
spin::Mutex,
};
pub use self::thread_state::*;
mod thread_state;
/// Runnable / computation entity
///
/// ## SYNOPSIS
///
/// TODO
///
/// ## DESCRIPTION
///
/// The thread object is the construct that represents a time-shared CPU execution
/// context. Thread objects live associated to a particular
/// [Process Object](crate::task::Process) which provides the memory and the handles to other
/// objects necessary for I/O and computation.
///
/// ### Lifetime
/// Threads are created by calling [`Thread::create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let | }
// start a new thread
let thread_ref_count = Arc::strong_count(&thread);
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2)
.expect("failed to start thread");
// wait 100ms for the new thread to exit
std::thread::sleep(core::time::Duration::from_millis(100));
// validate the thread have started and received correct arguments
assert_eq!(ARG1.load(Ordering::SeqCst), 0);
assert_eq!(ARG2.load(Ordering::SeqCst), 2);
// no other references to `Thread`
assert_eq!(Arc::strong_count(&thread), thread_ref_count);
// start again should fail
assert_eq!(
proc.start(&thread, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
// start another thread should fail
assert_eq!(
proc.start(&thread1, entry as usize, stack_top, handle.clone(), 2),
Err(ZxError::BAD_STATE)
);
}
}
| root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1000];
let stack_top = stack.as_mut_ptr() as usize + 0x1000;
// global variable for validation
static ARG1: AtomicUsize = AtomicUsize::new(0);
static ARG2: AtomicUsize = AtomicUsize::new(0);
// function for new thread
#[allow(unsafe_code)]
unsafe extern "C" fn entry(arg1: usize, arg2: usize) -> ! {
ARG1.store(arg1, Ordering::SeqCst);
ARG2.store(arg2, Ordering::SeqCst);
kernel_hal_unix::syscall_entry();
unreachable!(); | identifier_body |
lib.rs | //! [](./LICENSE-MIT)
//! [](./LICENSE-APACHE)
//! [](https://docs.rs/der-parser)
//! [](https://crates.io/crates/der-parser)
//! [](https://crates.io/crates/der-parser)
//! [](https://deps.rs/crate/der-parser/8.2.0)
//! [](https://github.com/rusticata/der-parser/actions)
//! [](#rust-version-requirements)
//!
//! # BER/DER Parser
//!
//! A parser for Basic Encoding Rules (BER [[X.690]]) and Distinguished Encoding Rules(DER
//! [[X.690]]), implemented with the [nom](https://github.com/Geal/nom) parser combinator
//! framework.
//!
//! It is written in pure Rust, fast, and makes extensive use of zero-copy. A lot of care is taken
//! to ensure security and safety of this crate, including design (recursion limit, defensive
//! programming), tests, and fuzzing. It also aims to be panic-free.
//!
//! Historically, this parser was intended for DER only, and BER support was added later. This may
//! still reflect on some naming schemes, but has no other consequence: the `BerObject` and
//! `DerObject` used in this crate are type aliases, so all functions are compatible.
//!
//! DER parsing functions have additional constraints verification, however.
//!
//! Serialization has also been added (see [Serialization](#serialization) )
//!
//! The code is available on [Github](https://github.com/rusticata/der-parser)
//! and is part of the [Rusticata](https://github.com/rusticata) project.
//!
//! # BER/DER parsers
//!
//! BER stands for Basic Encoding Rules, and is defined in [X.690]. It defines a set of rules to
//! encode and decode ASN.1 objects in binary.
//!
//! [X.690] also defines Distinguished Encoding Rules (DER), which is BER with added rules to
//! ensure canonical and unequivocal binary representation of objects.
//!
//! The choice of which one to use is usually guided by the speficication of the data format based
//! on BER or DER: for example, X.509 uses DER as encoding representation.
//!
//! See the related modules for object definitions, functions, and example:
//! - [`ber`]: Basic Encoding Rules
//! - [`der`]: Distinguished Encoding Rules
//!
//! ## Examples
//!
//! Parse two BER integers (see [BER/DER Integers](#berder-integers)):
//!
//! ```rust
//! use der_parser::ber::parse_ber_integer;
//!
//! let bytes = [ 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (rem, obj1) = parse_ber_integer(&bytes).expect("parsing failed");
//! let (rem, obj2) = parse_ber_integer(&rem).expect("parsing failed");
//! ```
//!
//! Parse a DER sequence of integers:
//!
//! ```rust
//! use der_parser::der::{parse_der_integer, parse_der_sequence_of};
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (rem, seq) = parse_der_sequence_of(parse_der_integer)(&bytes)
//! .expect("parsing failed");
//! ```
//!
//! Note: all parsing functions return the remaining (unparsed) bytes and the parsed object, or an
//! error.
//!
//! # DER parser design
//!
//! Parsing functions are inspired from `nom`, and follow the same interface. The most common
//! return type is [`BerResult`](error/type.BerResult.html), that stores the remaining bytes and
//! parsed [`BerObject`](ber/struct.BerObject.html), or an error. Reading the nom documentation may
//! help understanding how to write parsers and use the output.
//!
//! There are two different approaches for parsing DER objects: reading the objects recursively as
//! long as the tags are known, or specifying a description of the expected objects (generally from
//! the [ASN.1][X.680] description).
//!
//! The first parsing method can be done using the [`parse_ber`](ber/fn.parse_ber.html) and
//! [`parse_der`](der/fn.parse_der.html) methods.
//! It is useful when decoding an arbitrary DER object.
//! However, it cannot fully parse all objects, especially those containing IMPLICIT, OPTIONAL, or
//! DEFINED BY items.
//!
//! ```rust
//! use der_parser::parse_der;
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let parsed = parse_der(&bytes);
//! ```
//!
//! The second (and preferred) parsing method is to specify the expected objects recursively. The
//! following functions can be used:
//! - [`parse_ber_sequence_defined`](ber/fn.parse_ber_sequence_defined.html) and similar functions
//! for sequences and sets variants
//! - [`parse_ber_tagged_explicit`](ber/fn.parse_ber_tagged_explicit.html) for tagged explicit
//! - [`parse_ber_tagged_implicit`](ber/fn.parse_ber_tagged_implicit.html) for tagged implicit
//! - [`parse_ber_container`](ber/fn.parse_ber_container.html) for generic parsing, etc.
//! - DER objects use the `_der_` variants
//!
//! For example, to read a BER sequence containing two integers:
//!
//! ```rust
//! use der_parser::ber::*;
//! use der_parser::error::BerResult;
//!
//! fn localparse_seq(i:&[u8]) -> BerResult {
//! parse_ber_sequence_defined(|data| {
//! let (rem, a) = parse_ber_integer(data)?;
//! let (rem, b) = parse_ber_integer(rem)?;
//! Ok((rem, vec![a, b]))
//! })(i)
//! }
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (_, parsed) = localparse_seq(&bytes).expect("parsing failed");
//!
//! assert_eq!(parsed[0].as_u64(), Ok(65537));
//! assert_eq!(parsed[1].as_u64(), Ok(65536));
//! ```
//!
//! All functions return a [`BerResult`](error/type.BerResult.html) object: the parsed
//! [`BerObject`](ber/struct.BerObject.html), an `Incomplete` value, or an error.
//!
//! Note that this type is also a `Result`, so usual functions (`map`, `unwrap` etc.) are available.
//!
//! # Notes
//!
//! ## BER/DER Integers
//!
//! DER integers can be of any size, so it is not possible to store them as simple integers (they
//! are stored as raw bytes).
//!
//! Note that, by default, BER/DER integers are signed. Functions are provided to request reading
//! unsigned values, but they will fail if the integer value is negative.
//!
//! To get the integer value for all possible integer sign and size, use
//! [`BerObject::as_bigint`](ber/struct.BerObject.html#method.as_bigint)) (requires the `bigint` feature).
//!
//! To get a simple value expected to be in a known range, use methods like
//! [`BerObject::as_i32`](ber/struct.BerObject.html#method.as_i32)) and
//! [`BerObject::as_i64`](ber/struct.BerObject.html#method.as_i64) (or the unsigned versions
//! [`BerObject::as_u32`](ber/struct.BerObject.html#method.as_u32) and
//! [`BerObject::as_u64`](ber/struct.BerObject.html#method.as_u64)
//!),
//! which will return the value, or an error if the integer is too large (or is negative).
//!
//! ```rust
//! use der_parser::ber::*;
//!
//! let data = &[0x02, 0x03, 0x01, 0x00, 0x01];
//!
//! let (_, object) = parse_ber_integer(data).expect("parsing failed");
//! assert_eq!(object.as_u64(), Ok(65537));
//!
//! #[cfg(feature = "bigint")]
//! assert_eq!(object.as_bigint(), Ok(65537.into()))
//! ```
//!
//! Access to the raw value is possible using the `as_slice` method.
//!
//! ## Parsers, combinators, macros
//!
//! Some parsing tools (for ex for tagged objects) are available in different forms:
//! - parsers: (regular) functions that takes input and create an object
//! - combinators: functions that takes parsers (or combinators) as input, and return a function
//! (usually, the parser). They are used (combined) as building blocks to create more complex
//! parsers. | //!
//! - The DER constraints are verified if using `parse_der`.
//! - `BerObject` and `DerObject` are the same objects (type alias). The only difference is the
//! verification of constraints *during parsing*.
//!
//! ## Rust version requirements
//!
//! The 7.0 series of `der-parser` requires **Rustc version 1.53 or greater**, based on `asn1-rs`
//! and `nom` 7 dependencies.
//!
//! # Serialization
//!
//! Support for encoding BER/DER objects is currently being tested and can be used by activating the `serialize` feature.
//! Note that current status is **experimental**.
//!
//! See the `ber_encode_*` functions in the [`ber`](ber/index.html) module, and
//! [`BerObject::to_vec`](ber/struct.BerObject.html#method.to_vec)
//!
//! # References
//!
//! - [[X.680]] Abstract Syntax Notation One (ASN.1): Specification of basic notation.
//! - [[X.690]] ASN.1 encoding rules: Specification of Basic Encoding Rules (BER), Canonical
//! Encoding Rules (CER) and Distinguished Encoding Rules (DER).
//!
//! [X.680]: http://www.itu.int/rec/T-REC-X.680/en "Abstract Syntax Notation One (ASN.1):
//! Specification of basic notation."
//! [X.690]: https://www.itu.int/rec/T-REC-X.690/en "ASN.1 encoding rules: Specification of
//! Basic Encoding Rules (BER), Canonical Encoding Rules (CER) and Distinguished Encoding Rules
//! (DER)."
#![deny(/*missing_docs,*/
unstable_features,
unused_import_braces,
unused_qualifications,
unreachable_pub)]
#![forbid(unsafe_code)]
#![warn(
/* missing_docs,
rust_2018_idioms,*/
missing_debug_implementations,
)]
// pragmas for doc
#![deny(broken_intra_doc_links)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(test(
no_crate_inject,
attr(deny(warnings/*, rust_2018_idioms*/), allow(dead_code, unused_variables))
))]
#![no_std]
#[cfg(any(test, feature = "std"))]
#[macro_use]
extern crate std;
extern crate alloc;
#[allow(clippy::module_inception)]
pub mod ber;
pub mod der;
pub mod error;
pub mod oid;
// compatibility: re-export at crate root
pub use ber::parse_ber;
pub use der::parse_der;
pub use asn1_rs;
pub use nom;
#[cfg(feature = "bigint")]
#[cfg_attr(docsrs, doc(cfg(feature = "bigint")))]
pub use num_bigint;
pub use rusticata_macros;
// re-exports nom macros, so this crate's macros can be used without importing nom
pub use nom::IResult;
pub(crate) use asn1_rs::der_constraint_fail_if;
pub use asn1_rs::Oid;
/// Procedural macro to get encoded oids, see the [oid module](oid/index.html).
#[macro_export]
macro_rules! oid {
($($args:tt)*) => {{
$crate::asn1_rs::oid!($($args)*)
}};
} | //! - macros: these are generally previous (historic) versions of parsers, kept for compatibility.
//! They can sometime reduce the amount of code to write, but are hard to debug.
//! Parsers should be preferred when possible.
//!
//! ## Misc Notes | random_line_split |
lib.rs | //! Shared code for EZO sensor chips. These chips are used for sensing aquatic
//! media.
//!
//! > Currently, only __I2C Mode__ is supported.
#[macro_use]
extern crate failure;
extern crate i2cdev;
#[macro_use]
mod macros;
pub mod command;
pub mod errors;
pub mod response;
use std::ffi::{CStr, CString};
use std::thread;
use std::time::Duration;
use errors::*;
use failure::ResultExt;
use i2cdev::{core::I2CDevice, linux::LinuxI2CDevice};
/// Default buffer size for ASCII data responses.
///
/// Implement your own version of MAX_DATA wherever you are implementing
/// the `define_command!` macro, to override.
pub const MAX_DATA: usize = 42;
/// I2C command for the EZO chip.
pub trait Command {
type Error;
type Response;
fn get_command_string(&self) -> String;
fn get_delay(&self) -> u64;
fn write<D: I2CDevice>(&self, _device: &mut D) -> Result<Self::Response, Self::Error> {
unimplemented!(
"WIP: the provided method will disappear when the 'define_command' macro is updated"
);
}
#[deprecated(since="0.1.2", note="please use `Command::write` instead")]
fn run(&self, dev: &mut LinuxI2CDevice) -> Result<Self::Response, Self::Error>;
}
/// Determines the response code sent by the EZO chip.
pub fn | (code_byte: u8) -> ResponseCode {
use self::ResponseCode::*;
match code_byte {
x if x == NoDataExpected as u8 => NoDataExpected,
x if x == Pending as u8 => Pending,
x if x == DeviceError as u8 => DeviceError,
x if x == Success as u8 => Success,
_ => UnknownError,
}
}
/// Allowable baudrates used when changing the chip to UART mode.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BpsRate {
Bps300 = 300,
Bps1200 = 1200,
Bps2400 = 2400,
Bps9600 = 9600,
Bps19200 = 19200,
Bps38400 = 38400,
Bps57600 = 57600,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
#[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data);
assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty nul-terminated string
assert_eq!(string_from_response_data(&b"hello\0"[..]).unwrap(), "hello");
// high bit is on in the last character
assert_eq!(
string_from_response_data(&b"hell\xef\0"[..]).unwrap(),
"hello"
);
}
fn assert_converts_to_malformed_response(data: &[u8]) {
let result = string_from_response_data(&data);
match result {
Err(e) => assert_eq!(e.kind(), ErrorKind::MalformedResponse),
_ => unreachable!(),
}
}
#[test]
fn converts_invalid_response_to_error() {
// No nul terminator in either of these
assert_converts_to_malformed_response(&b""[..]);
assert_converts_to_malformed_response(&b"\xff"[..]);
}
#[test]
fn process_no_data_response_code() {
assert_eq!(response_code(255), ResponseCode::NoDataExpected);
}
#[test]
fn process_pending_response_code() {
assert_eq!(response_code(254), ResponseCode::Pending);
}
#[test]
fn process_error_response_code() {
assert_eq!(response_code(2), ResponseCode::DeviceError);
}
#[test]
fn process_success_response_code() {
assert_eq!(response_code(1), ResponseCode::Success);
}
#[test]
fn process_unknown_response_code() {
assert_eq!(response_code(0), ResponseCode::UnknownError);
assert_eq!(response_code(16), ResponseCode::UnknownError);
assert_eq!(response_code(156), ResponseCode::UnknownError);
}
#[test]
fn macro_creates_noack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_noack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_ack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000, Ack
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_ack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0, Ack
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_simple_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000,
_data: u32, { Ok (0u32) }
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_input_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(u8), { format!("cmd,{}\0", cmd) }, 140,
_data: (), { Ok (()) }
}
assert_eq!(InputCommand(0x7F).get_command_string(), "cmd,127\0");
assert_eq!(InputCommand(0x7F).get_delay(), 140);
}
}
| response_code | identifier_name |
lib.rs | //! Shared code for EZO sensor chips. These chips are used for sensing aquatic
//! media.
//!
//! > Currently, only __I2C Mode__ is supported.
#[macro_use]
extern crate failure;
extern crate i2cdev;
#[macro_use]
mod macros;
pub mod command;
pub mod errors;
pub mod response;
use std::ffi::{CStr, CString};
use std::thread;
use std::time::Duration;
use errors::*;
use failure::ResultExt;
use i2cdev::{core::I2CDevice, linux::LinuxI2CDevice};
/// Default buffer size for ASCII data responses.
///
/// Implement your own version of MAX_DATA wherever you are implementing
/// the `define_command!` macro, to override.
pub const MAX_DATA: usize = 42;
/// I2C command for the EZO chip.
pub trait Command {
type Error;
type Response;
fn get_command_string(&self) -> String;
fn get_delay(&self) -> u64;
fn write<D: I2CDevice>(&self, _device: &mut D) -> Result<Self::Response, Self::Error> {
unimplemented!(
"WIP: the provided method will disappear when the 'define_command' macro is updated"
);
}
#[deprecated(since="0.1.2", note="please use `Command::write` instead")]
fn run(&self, dev: &mut LinuxI2CDevice) -> Result<Self::Response, Self::Error>;
}
/// Determines the response code sent by the EZO chip.
pub fn response_code(code_byte: u8) -> ResponseCode {
use self::ResponseCode::*;
match code_byte {
x if x == NoDataExpected as u8 => NoDataExpected,
x if x == Pending as u8 => Pending,
x if x == DeviceError as u8 => DeviceError,
x if x == Success as u8 => Success,
_ => UnknownError,
}
}
/// Allowable baudrates used when changing the chip to UART mode.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BpsRate {
Bps300 = 300,
Bps1200 = 1200,
Bps2400 = 2400,
Bps9600 = 9600,
Bps19200 = 19200,
Bps38400 = 38400,
Bps57600 = 57600,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
#[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data);
assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty nul-terminated string
assert_eq!(string_from_response_data(&b"hello\0"[..]).unwrap(), "hello");
// high bit is on in the last character
assert_eq!(
string_from_response_data(&b"hell\xef\0"[..]).unwrap(),
"hello"
);
}
fn assert_converts_to_malformed_response(data: &[u8]) {
let result = string_from_response_data(&data);
match result {
Err(e) => assert_eq!(e.kind(), ErrorKind::MalformedResponse),
_ => unreachable!(),
}
}
#[test]
fn converts_invalid_response_to_error() {
// No nul terminator in either of these
assert_converts_to_malformed_response(&b""[..]);
assert_converts_to_malformed_response(&b"\xff"[..]);
}
#[test]
fn process_no_data_response_code() |
#[test]
fn process_pending_response_code() {
assert_eq!(response_code(254), ResponseCode::Pending);
}
#[test]
fn process_error_response_code() {
assert_eq!(response_code(2), ResponseCode::DeviceError);
}
#[test]
fn process_success_response_code() {
assert_eq!(response_code(1), ResponseCode::Success);
}
#[test]
fn process_unknown_response_code() {
assert_eq!(response_code(0), ResponseCode::UnknownError);
assert_eq!(response_code(16), ResponseCode::UnknownError);
assert_eq!(response_code(156), ResponseCode::UnknownError);
}
#[test]
fn macro_creates_noack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_noack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_ack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000, Ack
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_ack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0, Ack
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_simple_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000,
_data: u32, { Ok (0u32) }
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_input_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(u8), { format!("cmd,{}\0", cmd) }, 140,
_data: (), { Ok (()) }
}
assert_eq!(InputCommand(0x7F).get_command_string(), "cmd,127\0");
assert_eq!(InputCommand(0x7F).get_delay(), 140);
}
}
| {
assert_eq!(response_code(255), ResponseCode::NoDataExpected);
} | identifier_body |
lib.rs | //! Shared code for EZO sensor chips. These chips are used for sensing aquatic
//! media.
//!
//! > Currently, only __I2C Mode__ is supported.
#[macro_use]
extern crate failure;
extern crate i2cdev;
#[macro_use]
mod macros;
pub mod command;
pub mod errors;
pub mod response;
use std::ffi::{CStr, CString};
use std::thread;
use std::time::Duration;
use errors::*;
use failure::ResultExt;
use i2cdev::{core::I2CDevice, linux::LinuxI2CDevice};
/// Default buffer size for ASCII data responses.
///
/// Implement your own version of MAX_DATA wherever you are implementing
/// the `define_command!` macro, to override.
pub const MAX_DATA: usize = 42;
/// I2C command for the EZO chip.
pub trait Command {
type Error;
type Response;
fn get_command_string(&self) -> String;
fn get_delay(&self) -> u64;
fn write<D: I2CDevice>(&self, _device: &mut D) -> Result<Self::Response, Self::Error> {
unimplemented!(
"WIP: the provided method will disappear when the 'define_command' macro is updated"
);
}
#[deprecated(since="0.1.2", note="please use `Command::write` instead")]
fn run(&self, dev: &mut LinuxI2CDevice) -> Result<Self::Response, Self::Error>;
}
/// Determines the response code sent by the EZO chip.
pub fn response_code(code_byte: u8) -> ResponseCode {
use self::ResponseCode::*;
match code_byte {
x if x == NoDataExpected as u8 => NoDataExpected,
x if x == Pending as u8 => Pending,
x if x == DeviceError as u8 => DeviceError,
x if x == Success as u8 => Success,
_ => UnknownError,
}
}
/// Allowable baudrates used when changing the chip to UART mode.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BpsRate {
Bps300 = 300,
Bps1200 = 1200,
Bps2400 = 2400,
Bps9600 = 9600,
Bps19200 = 19200,
Bps38400 = 38400,
Bps57600 = 57600,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
| assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty nul-terminated string
assert_eq!(string_from_response_data(&b"hello\0"[..]).unwrap(), "hello");
// high bit is on in the last character
assert_eq!(
string_from_response_data(&b"hell\xef\0"[..]).unwrap(),
"hello"
);
}
fn assert_converts_to_malformed_response(data: &[u8]) {
let result = string_from_response_data(&data);
match result {
Err(e) => assert_eq!(e.kind(), ErrorKind::MalformedResponse),
_ => unreachable!(),
}
}
#[test]
fn converts_invalid_response_to_error() {
// No nul terminator in either of these
assert_converts_to_malformed_response(&b""[..]);
assert_converts_to_malformed_response(&b"\xff"[..]);
}
#[test]
fn process_no_data_response_code() {
assert_eq!(response_code(255), ResponseCode::NoDataExpected);
}
#[test]
fn process_pending_response_code() {
assert_eq!(response_code(254), ResponseCode::Pending);
}
#[test]
fn process_error_response_code() {
assert_eq!(response_code(2), ResponseCode::DeviceError);
}
#[test]
fn process_success_response_code() {
assert_eq!(response_code(1), ResponseCode::Success);
}
#[test]
fn process_unknown_response_code() {
assert_eq!(response_code(0), ResponseCode::UnknownError);
assert_eq!(response_code(16), ResponseCode::UnknownError);
assert_eq!(response_code(156), ResponseCode::UnknownError);
}
#[test]
fn macro_creates_noack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_noack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_ack_simple_command_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000, Ack
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_ack_input_command_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(f32), { format!("cmd,{:.*}", 2, cmd) }, 0, Ack
}
assert_eq!(InputCommand(3.285).get_command_string(), "cmd,3.29");
assert_eq!(InputCommand(3.285).get_delay(), 0);
}
#[test]
fn macro_creates_simple_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
ControlCommand, { "cmd".to_string() }, 1000,
_data: u32, { Ok (0u32) }
}
assert_eq!(ControlCommand.get_command_string(), "cmd");
assert_eq!(ControlCommand.get_delay(), 1000);
}
#[test]
fn macro_creates_input_command_with_response_with_docs() {
define_command! {
doc: "docstring here",
cmd: InputCommand(u8), { format!("cmd,{}\0", cmd) }, 140,
_data: (), { Ok (()) }
}
assert_eq!(InputCommand(0x7F).get_command_string(), "cmd,127\0");
assert_eq!(InputCommand(0x7F).get_delay(), 140);
}
} | #[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data); | random_line_split |
mod.rs | // Copyright 2018 ETH Zurich. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The publisher logic and the interfaces used to control it.
use std::io;
use std::thread;
use std::sync::{Arc, Mutex, Condvar};
use slab::Slab;
use serde::Serialize;
use timely::ExchangeData;
use timely::progress::timestamp::Timestamp;
use timely::dataflow::operators::capture::event::{Event as TimelyEvent, EventPusher};
use tokio_core::reactor::{Core, Handle};
use strymon_communication::Network;
use strymon_communication::transport::{Listener, Sender, Receiver};
use strymon_communication::message::MessageBuf;
use futures::future::Future;
use futures::stream::{self, Stream};
use futures::unsync::mpsc;
use protocol::{Message, InitialSnapshot, RemoteTimestamp};
use self::progress::{LowerFrontier, UpperFrontier};
use self::sink::{EventSink, EventStream};
pub mod sink;
pub mod progress;
type SubscriberId = usize;
enum Event<T, D> {
Timely(TimelyEvent<T, D>),
Accepted((Sender, Receiver)),
Disconnected(SubscriberId),
Error(SubscriberId, io::Error),
ShutdownRequested,
}
/// State and logic of the publisher.
///
/// Maintains the upper and lower frontier of a Timely stream and broadcasts
/// their updated versions and any incoming data tuples to subscribed clients.
struct PublisherServer<T: Timestamp, D> {
// progress tracking state
lower: LowerFrontier<T>,
upper: UpperFrontier<T>,
// connected subscribers
subscribers: Slab<Sender>,
count: AtomicCounter,
// tokio event loop
events: Box<Stream<Item = Event<T, D>, Error = io::Error>>,
notificator: mpsc::UnboundedSender<Event<T, D>>,
core: Core,
handle: Handle,
}
impl<T: RemoteTimestamp, D: ExchangeData + Serialize> PublisherServer<T, D> {
/// Creates a new publisher, accepting subscribers on `socket`, publishing
/// the Timely events observed on `stream`.
fn new(socket: Listener, stream: EventStream<T, D>, count: AtomicCounter) -> io::Result<Self> {
let core = Core::new()?;
let handle = core.handle();
// queue for disconnection events from subscribers
let (notificator, subscribers) = mpsc::unbounded();
// we have three event sources:
let listener = socket.map(Event::Accepted);
let timely = stream
.map(Event::Timely)
.map_err(|_| unreachable!())
.chain(stream::once(Ok(Event::ShutdownRequested)));
let subscribers = subscribers.map_err(|_| unreachable!());
// all of which we merge into a single stream
let events = listener.select(subscribers).select(timely);
Ok(PublisherServer {
lower: LowerFrontier::default(),
upper: UpperFrontier::empty(),
subscribers: Slab::new(),
count: count,
events: Box::new(events),
notificator: notificator,
core: core,
handle: handle,
})
}
fn next_event(&mut self) -> io::Result<Event<T, D>> {
// run tokio reactor until we get the next event
let next_msg = self.events.by_ref().into_future();
match self.core.run(next_msg) {
Ok((msg, _)) => Ok(msg.unwrap()),
Err((err, _)) => Err(err),
}
}
/// Starts serving subscribers, blocks until the Timely stream completes
/// (or an error happens).
fn serve(mut self) -> io::Result<()> {
loop {
match self.next_event()? {
// processing incoming timely events
Event::Timely(ev) => self.timely_event(ev)?,
// handle networking events
Event::Accepted(sub) => self.add_subscriber(sub)?,
Event::Disconnected(id) => self.remove_subscriber(id),
Event::Error(id, err) => {
// subscriber errors should not be fatal. we just log
// them and forget about it.
error!("Subscriber {}: {}", id, err);
}
Event::ShutdownRequested => {
// this drops self, and thus drain the queues of | return Ok(());
}
}
}
}
/// Sends `msg` to all connected subscribers.
fn broadcast(&self, msg: MessageBuf) -> io::Result<()> {
if self.subscribers.len() == 0 {
// nothing to do here
return Ok(());
}
let last = self.subscribers.len() - 1;
for (id, sub) in self.subscribers.iter() {
if id < last {
sub.send(msg.clone());
} else {
// this case is a hint to the compiler that for the last
// iteration we can move `msg` directly, no need to clone
sub.send(msg);
break;
}
}
Ok(())
}
/// Processes a single Timely event, might cause multiple messages to be
/// sent to connected subscribers.
fn timely_event(&mut self, event: TimelyEvent<T, D>) -> io::Result<()> {
match event {
TimelyEvent::Progress(mut updates) => {
self.lower.update(&mut updates);
if!updates.is_empty() {
self.broadcast(Message::<T, D>::frontier_update(updates)?)?;
}
}
TimelyEvent::Messages(time, data) => {
self.upper.insert(time.clone());
self.broadcast(Message::<T, D>::data_message(time, data)?)?;
}
};
Ok(())
}
/// Registers a new subscriber.
///
/// Installs a "monitor" for the subscriber, making sure we get notified
/// when it disconnects.
fn add_subscriber(&mut self, (tx, rx): (Sender, Receiver)) -> io::Result<()> {
// inform new subscriber about current state of progress
let snapshot = InitialSnapshot::encode(self.lower.elements(), self.upper.elements())?;
tx.send(snapshot);
// add it to the list of listening subscribers
self.count.increment();
let id = self.subscribers.insert(tx);
// register event handler for disconnection
let notificator = self.notificator.clone();
let subscriber = rx.for_each(|_| {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"unexpected message",
))
}).then(move |res| {
let event = match res {
Ok(()) => Event::Disconnected(id),
Err(err) => Event::Error(id, err),
};
notificator.unbounded_send(event).map_err(|_| ())
});
self.handle.spawn(subscriber);
Ok(())
}
/// Removes a subscriber from the broadcasting list.
///
/// This does not cancel the subscriber monitor registered above, so if the
/// subscriber is still alive, it will still emit events on errors or
/// when it disconnects.
fn remove_subscriber(&mut self, id: SubscriberId) {
self.count.decrement();
self.subscribers.remove(id);
}
}
impl<T: Timestamp, D> Drop for PublisherServer<T, D> {
fn drop(&mut self) {
self.subscribers.clear();
self.count.invalidate();
}
}
/// The host and port on which the publisher is accepting subscribers.
pub type Addr = (String, u16);
/// A handle for spawned publisher.
///
/// This implements `EventPusher`, so it can be used with Timely's `capture`.
/// When dropped, will block and drain any subscriber queues.
pub struct Publisher<T, D> {
/// Handle for events to be published by this instance.
sink: Option<EventSink<T, D>>,
/// A join handle for the spawned thread.
thread: Thread,
// The current subscriber count (wrapped in a mutex, so we can block on it)
subscribers: AtomicCounter,
}
impl<T, D> Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
/// Spawns a new publisher thread on a ephemerial network port.
///
/// The corresponding address can be obtained from the first member of the
/// tuple. The publisher handle itself is used to send events into the
/// topic.
pub fn new(network: &Network) -> io::Result<(Addr, Self)> {
// the queue between the Timely operator and this publisher thread
let (timely_sink, timely_stream) = sink::pair();
// the network socket on which subscribers are accepted
let listener = network.listen(None)?;
let addr = {
let (host, port) = listener.external_addr();
(String::from(host), port)
};
let subscribers = AtomicCounter::new();
let count = subscribers.clone();
// main event loop of the publisher thread
let handle = thread::spawn(move || {
PublisherServer::new(listener, timely_stream, count)
.and_then(|publisher| publisher.serve())
});
let publisher = Publisher {
sink: Some(timely_sink),
thread: Thread::new(handle),
subscribers: subscribers,
};
Ok((addr, publisher))
}
/// Blocks the current thread until some subscribers have connected.
///
/// Returns the number of currently connected subscribers. Note that this
/// does not actually guarantee that the subscribers are still connected,
/// only that there was some recent point in time when there were some
/// connected subscribers. This is mostly intended for testing purposes.
#[allow(dead_code)]
pub fn subscriber_barrier(&self) -> io::Result<usize> {
// important: this must unblock when the thread dies, so we make
// sure to call `count.invalidate()` in the publisher thread when it drops
let count = self.subscribers.wait_nonzero();
if count == COUNTER_INVALID {
Err(io::Error::new(io::ErrorKind::Other, "publisher terminated"))
} else {
Ok(count)
}
}
}
impl<T, D> EventPusher<T, D> for Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
fn push(&mut self, event: TimelyEvent<T, D>) {
self.sink.as_mut().unwrap().push(event)
}
}
impl<T, D> Drop for Publisher<T, D> {
fn drop(&mut self) {
// Note the the drop order is important here: The event `EventSink` must be
// dropped before `Thread` in order to avoid a deadlock: Dropping `EventSink`
// indicates to the publisher thread that it has to shut down, which will block
// the join operation until the shutdown is complete.
drop(self.sink.take());
if let Err(err) = self.thread.join() {
error!("failed to drain subscriber queues: {}", err);
}
}
}
type ThreadHandle = thread::JoinHandle<io::Result<()>>;
/// A join handle for the publisher thread.
///
/// This can be used to ensure all subscriber queues are drained properly.
struct Thread(Option<ThreadHandle>);
impl Thread {
fn new(handle: ThreadHandle) -> Self {
Thread(Some(handle))
}
fn join(&mut self) -> io::Result<()> {
match self.0.take().map(|t| t.join()) {
Some(Ok(res)) => res,
Some(Err(_)) => Err(io::Error::new(io::ErrorKind::Other, "thread panicked")),
None => Err(io::Error::new(io::ErrorKind::Other, "already joined")),
}
}
}
/// A counter which can block readers when it reaches zero.
#[derive(Debug, Clone)]
struct AtomicCounter(Arc<(Mutex<usize>, Condvar)>);
const COUNTER_INVALID: usize = ::std::usize::MAX;
impl AtomicCounter {
fn new() -> Self {
AtomicCounter(Default::default())
}
fn lock<'a>(&'a self) -> (::std::sync::MutexGuard<'a, usize>, &'a Condvar) {
let AtomicCounter(ref inner) = *self;
(
inner.0.lock().expect("publisher thread poisioned counter"),
&inner.1,
)
}
fn increment(&self) {
let (mut count, nonzero) = self.lock();
*count += 1;
nonzero.notify_all();
}
fn decrement(&self) {
let (mut count, _) = self.lock();
debug_assert!(*count > 0);
*count -= 1;
}
fn invalidate(&self) {
let (mut count, nonzero) = self.lock();
*count = COUNTER_INVALID;
nonzero.notify_all();
}
fn wait_nonzero(&self) -> usize {
let (mut count, nonzero) = self.lock();
while *count == 0 {
count = nonzero.wait(count).unwrap();
}
*count
}
} | // all still connected subscribers | random_line_split |
mod.rs | // Copyright 2018 ETH Zurich. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The publisher logic and the interfaces used to control it.
use std::io;
use std::thread;
use std::sync::{Arc, Mutex, Condvar};
use slab::Slab;
use serde::Serialize;
use timely::ExchangeData;
use timely::progress::timestamp::Timestamp;
use timely::dataflow::operators::capture::event::{Event as TimelyEvent, EventPusher};
use tokio_core::reactor::{Core, Handle};
use strymon_communication::Network;
use strymon_communication::transport::{Listener, Sender, Receiver};
use strymon_communication::message::MessageBuf;
use futures::future::Future;
use futures::stream::{self, Stream};
use futures::unsync::mpsc;
use protocol::{Message, InitialSnapshot, RemoteTimestamp};
use self::progress::{LowerFrontier, UpperFrontier};
use self::sink::{EventSink, EventStream};
pub mod sink;
pub mod progress;
type SubscriberId = usize;
enum Event<T, D> {
Timely(TimelyEvent<T, D>),
Accepted((Sender, Receiver)),
Disconnected(SubscriberId),
Error(SubscriberId, io::Error),
ShutdownRequested,
}
/// State and logic of the publisher.
///
/// Maintains the upper and lower frontier of a Timely stream and broadcasts
/// their updated versions and any incoming data tuples to subscribed clients.
struct PublisherServer<T: Timestamp, D> {
// progress tracking state
lower: LowerFrontier<T>,
upper: UpperFrontier<T>,
// connected subscribers
subscribers: Slab<Sender>,
count: AtomicCounter,
// tokio event loop
events: Box<Stream<Item = Event<T, D>, Error = io::Error>>,
notificator: mpsc::UnboundedSender<Event<T, D>>,
core: Core,
handle: Handle,
}
impl<T: RemoteTimestamp, D: ExchangeData + Serialize> PublisherServer<T, D> {
/// Creates a new publisher, accepting subscribers on `socket`, publishing
/// the Timely events observed on `stream`.
fn new(socket: Listener, stream: EventStream<T, D>, count: AtomicCounter) -> io::Result<Self> {
let core = Core::new()?;
let handle = core.handle();
// queue for disconnection events from subscribers
let (notificator, subscribers) = mpsc::unbounded();
// we have three event sources:
let listener = socket.map(Event::Accepted);
let timely = stream
.map(Event::Timely)
.map_err(|_| unreachable!())
.chain(stream::once(Ok(Event::ShutdownRequested)));
let subscribers = subscribers.map_err(|_| unreachable!());
// all of which we merge into a single stream
let events = listener.select(subscribers).select(timely);
Ok(PublisherServer {
lower: LowerFrontier::default(),
upper: UpperFrontier::empty(),
subscribers: Slab::new(),
count: count,
events: Box::new(events),
notificator: notificator,
core: core,
handle: handle,
})
}
fn next_event(&mut self) -> io::Result<Event<T, D>> {
// run tokio reactor until we get the next event
let next_msg = self.events.by_ref().into_future();
match self.core.run(next_msg) {
Ok((msg, _)) => Ok(msg.unwrap()),
Err((err, _)) => Err(err),
}
}
/// Starts serving subscribers, blocks until the Timely stream completes
/// (or an error happens).
fn serve(mut self) -> io::Result<()> {
loop {
match self.next_event()? {
// processing incoming timely events
Event::Timely(ev) => self.timely_event(ev)?,
// handle networking events
Event::Accepted(sub) => self.add_subscriber(sub)?,
Event::Disconnected(id) => self.remove_subscriber(id),
Event::Error(id, err) => {
// subscriber errors should not be fatal. we just log
// them and forget about it.
error!("Subscriber {}: {}", id, err);
}
Event::ShutdownRequested => {
// this drops self, and thus drain the queues of
// all still connected subscribers
return Ok(());
}
}
}
}
/// Sends `msg` to all connected subscribers.
fn broadcast(&self, msg: MessageBuf) -> io::Result<()> {
if self.subscribers.len() == 0 {
// nothing to do here
return Ok(());
}
let last = self.subscribers.len() - 1;
for (id, sub) in self.subscribers.iter() {
if id < last {
sub.send(msg.clone());
} else {
// this case is a hint to the compiler that for the last
// iteration we can move `msg` directly, no need to clone
sub.send(msg);
break;
}
}
Ok(())
}
/// Processes a single Timely event, might cause multiple messages to be
/// sent to connected subscribers.
fn timely_event(&mut self, event: TimelyEvent<T, D>) -> io::Result<()> {
match event {
TimelyEvent::Progress(mut updates) => {
self.lower.update(&mut updates);
if!updates.is_empty() {
self.broadcast(Message::<T, D>::frontier_update(updates)?)?;
}
}
TimelyEvent::Messages(time, data) => {
self.upper.insert(time.clone());
self.broadcast(Message::<T, D>::data_message(time, data)?)?;
}
};
Ok(())
}
/// Registers a new subscriber.
///
/// Installs a "monitor" for the subscriber, making sure we get notified
/// when it disconnects.
fn add_subscriber(&mut self, (tx, rx): (Sender, Receiver)) -> io::Result<()> {
// inform new subscriber about current state of progress
let snapshot = InitialSnapshot::encode(self.lower.elements(), self.upper.elements())?;
tx.send(snapshot);
// add it to the list of listening subscribers
self.count.increment();
let id = self.subscribers.insert(tx);
// register event handler for disconnection
let notificator = self.notificator.clone();
let subscriber = rx.for_each(|_| {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"unexpected message",
))
}).then(move |res| {
let event = match res {
Ok(()) => Event::Disconnected(id),
Err(err) => Event::Error(id, err),
};
notificator.unbounded_send(event).map_err(|_| ())
});
self.handle.spawn(subscriber);
Ok(())
}
/// Removes a subscriber from the broadcasting list.
///
/// This does not cancel the subscriber monitor registered above, so if the
/// subscriber is still alive, it will still emit events on errors or
/// when it disconnects.
fn remove_subscriber(&mut self, id: SubscriberId) {
self.count.decrement();
self.subscribers.remove(id);
}
}
impl<T: Timestamp, D> Drop for PublisherServer<T, D> {
fn drop(&mut self) {
self.subscribers.clear();
self.count.invalidate();
}
}
/// The host and port on which the publisher is accepting subscribers.
pub type Addr = (String, u16);
/// A handle for spawned publisher.
///
/// This implements `EventPusher`, so it can be used with Timely's `capture`.
/// When dropped, will block and drain any subscriber queues.
pub struct Publisher<T, D> {
/// Handle for events to be published by this instance.
sink: Option<EventSink<T, D>>,
/// A join handle for the spawned thread.
thread: Thread,
// The current subscriber count (wrapped in a mutex, so we can block on it)
subscribers: AtomicCounter,
}
impl<T, D> Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
/// Spawns a new publisher thread on a ephemerial network port.
///
/// The corresponding address can be obtained from the first member of the
/// tuple. The publisher handle itself is used to send events into the
/// topic.
pub fn new(network: &Network) -> io::Result<(Addr, Self)> {
// the queue between the Timely operator and this publisher thread
let (timely_sink, timely_stream) = sink::pair();
// the network socket on which subscribers are accepted
let listener = network.listen(None)?;
let addr = {
let (host, port) = listener.external_addr();
(String::from(host), port)
};
let subscribers = AtomicCounter::new();
let count = subscribers.clone();
// main event loop of the publisher thread
let handle = thread::spawn(move || {
PublisherServer::new(listener, timely_stream, count)
.and_then(|publisher| publisher.serve())
});
let publisher = Publisher {
sink: Some(timely_sink),
thread: Thread::new(handle),
subscribers: subscribers,
};
Ok((addr, publisher))
}
/// Blocks the current thread until some subscribers have connected.
///
/// Returns the number of currently connected subscribers. Note that this
/// does not actually guarantee that the subscribers are still connected,
/// only that there was some recent point in time when there were some
/// connected subscribers. This is mostly intended for testing purposes.
#[allow(dead_code)]
pub fn subscriber_barrier(&self) -> io::Result<usize> {
// important: this must unblock when the thread dies, so we make
// sure to call `count.invalidate()` in the publisher thread when it drops
let count = self.subscribers.wait_nonzero();
if count == COUNTER_INVALID {
Err(io::Error::new(io::ErrorKind::Other, "publisher terminated"))
} else {
Ok(count)
}
}
}
impl<T, D> EventPusher<T, D> for Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
fn push(&mut self, event: TimelyEvent<T, D>) {
self.sink.as_mut().unwrap().push(event)
}
}
impl<T, D> Drop for Publisher<T, D> {
fn drop(&mut self) {
// Note the the drop order is important here: The event `EventSink` must be
// dropped before `Thread` in order to avoid a deadlock: Dropping `EventSink`
// indicates to the publisher thread that it has to shut down, which will block
// the join operation until the shutdown is complete.
drop(self.sink.take());
if let Err(err) = self.thread.join() {
error!("failed to drain subscriber queues: {}", err);
}
}
}
type ThreadHandle = thread::JoinHandle<io::Result<()>>;
/// A join handle for the publisher thread.
///
/// This can be used to ensure all subscriber queues are drained properly.
struct Thread(Option<ThreadHandle>);
impl Thread {
fn new(handle: ThreadHandle) -> Self {
Thread(Some(handle))
}
fn join(&mut self) -> io::Result<()> {
match self.0.take().map(|t| t.join()) {
Some(Ok(res)) => res,
Some(Err(_)) => Err(io::Error::new(io::ErrorKind::Other, "thread panicked")),
None => Err(io::Error::new(io::ErrorKind::Other, "already joined")),
}
}
}
/// A counter which can block readers when it reaches zero.
#[derive(Debug, Clone)]
struct AtomicCounter(Arc<(Mutex<usize>, Condvar)>);
const COUNTER_INVALID: usize = ::std::usize::MAX;
impl AtomicCounter {
fn new() -> Self {
AtomicCounter(Default::default())
}
fn | <'a>(&'a self) -> (::std::sync::MutexGuard<'a, usize>, &'a Condvar) {
let AtomicCounter(ref inner) = *self;
(
inner.0.lock().expect("publisher thread poisioned counter"),
&inner.1,
)
}
fn increment(&self) {
let (mut count, nonzero) = self.lock();
*count += 1;
nonzero.notify_all();
}
fn decrement(&self) {
let (mut count, _) = self.lock();
debug_assert!(*count > 0);
*count -= 1;
}
fn invalidate(&self) {
let (mut count, nonzero) = self.lock();
*count = COUNTER_INVALID;
nonzero.notify_all();
}
fn wait_nonzero(&self) -> usize {
let (mut count, nonzero) = self.lock();
while *count == 0 {
count = nonzero.wait(count).unwrap();
}
*count
}
}
| lock | identifier_name |
lib.rs | ::api) with the generic Kubernetes [`Api`](crate::Api)
//! - [`derive`](kube_derive) with the [`CustomResource`](crate::CustomResource) derive for building controllers types
//! - [`runtime`](crate::runtime) with a [`Controller`](crate::runtime::Controller) / [`watcher`](crate::runtime::watcher()) / [`reflector`](crate::runtime::reflector::reflector) / [`Store`](crate::runtime::reflector::Store)
//! - [`core`](crate::core) with generics from `apimachinery`
//!
//! You can use each of these as you need with the help of the [exported features](https://github.com/kube-rs/kube/blob/main/kube/Cargo.toml#L18).
//!
//! # Using the Client
//! ```no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube::{Client, api::{Api, ResourceExt, ListParams, PostParams}};
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Infer the runtime environment and try to create a Kubernetes Client
//! let client = Client::try_default().await?;
//!
//! // Read pods in the configured namespace into the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi/*/k8s_openapi/) for documentation about the generated Kubernetes types
//!
//! # Using the Runtime with the Derive macro
//!
//! ```no_run
//! use schemars::JsonSchema;
//! use serde::{Deserialize, Serialize};
//! use serde_json::json;
//! use futures::{StreamExt, TryStreamExt};
//! use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
//! use kube::{
//! api::{Api, DeleteParams, PatchParams, Patch, ResourceExt},
//! core::CustomResourceExt,
//! Client, CustomResource,
//! runtime::{watcher, WatchStreamExt, wait::{conditions, await_condition}},
//! };
//!
//! // Our custom resource
//! #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
//! #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
//! pub struct FooSpec {
//! info: String,
//! #[schemars(length(min = 3))]
//! name: String,
//! replicas: i32,
//! }
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let client = Client::try_default().await?;
//! let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
//!
//! // Apply the CRD so users can create Foo instances in Kubernetes
//! crds.patch("foos.clux.dev",
//! &PatchParams::apply("my_manager"),
//! &Patch::Apply(Foo::crd())
//! ).await?;
//!
//! // Wait for the CRD to be ready
//! tokio::time::timeout(
//! std::time::Duration::from_secs(10),
//! await_condition(crds, "foos.clux.dev", conditions::is_crd_established())
//! ).await?;
//!
//! // Watch for changes to foos in the configured namespace
//! let foos: Api<Foo> = Api::default_namespaced(client.clone());
//! let wc = watcher::Config::default();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)]
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored`
#[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct ContainerSimple {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if!caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced | else {
Api::all_with(client.clone(), &ar)
};
api.list(&Default::default()).await?;
}
}
// cleanup
crds.delete("testcrs.kube.rs", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create await a pod)"]
#[cfg(feature = "runtime")]
async fn pod_can_await_conditions() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, PostParams},
runtime::wait::{await_condition, conditions, delete::delete_and_finalize, Condition},
Api, Client,
};
use k8s_openapi::api::core::v1::Pod;
use std::time::Duration;
use tokio::time::timeout;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 20s
let data: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube4",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 20"],
}],
}
}))?;
let pp = PostParams::default();
assert_eq!(
data.name_unchecked(),
pods.create(&pp, &data).await?.name_unchecked()
);
// Watch it phase for a few seconds
let is_running = await_condition(pods.clone(), "busybox-kube4", conditions::is_pod_running());
let _ = timeout(Duration::from_secs(15), is_running).await?;
// Verify we can get it
let pod = pods.get("busybox-kube4").await?;
assert_eq!(pod.spec.as_ref().unwrap().containers[0].name, "busybox");
// Wait for a more complicated condition: ContainersReady AND Initialized
// TODO: remove these once we can write these functions generically
fn is_each_container_ready() -> impl Condition<Pod> {
|obj: Option<&Pod>| {
if let Some(o) = obj {
if let Some(s) = &o.status {
if let Some(conds) = &s.conditions {
if let Some(pcond) = conds.iter().find(|c| c.type_ == "ContainersReady") {
return pcond.status == "True";
}
}
}
}
false
}
}
let is_fully_ready = await_condition(
pods.clone(),
"busybox-kube4",
conditions::is_pod_running().and(is_each_container_ready()),
);
let _ = timeout(Duration::from_secs(10), is_fully_ready).await?;
// Delete it - and wait for deletion to complete
| {
Api::default_namespaced_with(client.clone(), &ar)
} | conditional_block |
lib.rs | crate::api) with the generic Kubernetes [`Api`](crate::Api)
//! - [`derive`](kube_derive) with the [`CustomResource`](crate::CustomResource) derive for building controllers types
//! - [`runtime`](crate::runtime) with a [`Controller`](crate::runtime::Controller) / [`watcher`](crate::runtime::watcher()) / [`reflector`](crate::runtime::reflector::reflector) / [`Store`](crate::runtime::reflector::Store)
//! - [`core`](crate::core) with generics from `apimachinery`
//!
//! You can use each of these as you need with the help of the [exported features](https://github.com/kube-rs/kube/blob/main/kube/Cargo.toml#L18).
//!
//! # Using the Client
//! ```no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube::{Client, api::{Api, ResourceExt, ListParams, PostParams}};
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Infer the runtime environment and try to create a Kubernetes Client
//! let client = Client::try_default().await?;
//!
//! // Read pods in the configured namespace into the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi/*/k8s_openapi/) for documentation about the generated Kubernetes types
//!
//! # Using the Runtime with the Derive macro
//!
//! ```no_run
//! use schemars::JsonSchema;
//! use serde::{Deserialize, Serialize};
//! use serde_json::json;
//! use futures::{StreamExt, TryStreamExt};
//! use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
//! use kube::{
//! api::{Api, DeleteParams, PatchParams, Patch, ResourceExt},
//! core::CustomResourceExt,
//! Client, CustomResource,
//! runtime::{watcher, WatchStreamExt, wait::{conditions, await_condition}},
//! };
//!
//! // Our custom resource
//! #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
//! #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
//! pub struct FooSpec {
//! info: String,
//! #[schemars(length(min = 3))]
//! name: String,
//! replicas: i32,
//! }
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let client = Client::try_default().await?;
//! let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
//!
//! // Apply the CRD so users can create Foo instances in Kubernetes
//! crds.patch("foos.clux.dev",
//! &PatchParams::apply("my_manager"),
//! &Patch::Apply(Foo::crd())
//! ).await?;
//!
//! // Wait for the CRD to be ready
//! tokio::time::timeout(
//! std::time::Duration::from_secs(10),
//! await_condition(crds, "foos.clux.dev", conditions::is_crd_established())
//! ).await?;
//!
//! // Watch for changes to foos in the configured namespace
//! let foos: Api<Foo> = Api::default_namespaced(client.clone());
//! let wc = watcher::Config::default();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)] | #[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct ContainerSimple {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if!caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced {
Api::default_namespaced_with(client.clone(), &ar)
} else {
Api::all_with(client.clone(), &ar)
};
api.list(&Default::default()).await?;
}
}
// cleanup
crds.delete("testcrs.kube.rs", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create await a pod)"]
#[cfg(feature = "runtime")]
async fn pod_can_await_conditions() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, PostParams},
runtime::wait::{await_condition, conditions, delete::delete_and_finalize, Condition},
Api, Client,
};
use k8s_openapi::api::core::v1::Pod;
use std::time::Duration;
use tokio::time::timeout;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 20s
let data: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube4",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 20"],
}],
}
}))?;
let pp = PostParams::default();
assert_eq!(
data.name_unchecked(),
pods.create(&pp, &data).await?.name_unchecked()
);
// Watch it phase for a few seconds
let is_running = await_condition(pods.clone(), "busybox-kube4", conditions::is_pod_running());
let _ = timeout(Duration::from_secs(15), is_running).await?;
// Verify we can get it
let pod = pods.get("busybox-kube4").await?;
assert_eq!(pod.spec.as_ref().unwrap().containers[0].name, "busybox");
// Wait for a more complicated condition: ContainersReady AND Initialized
// TODO: remove these once we can write these functions generically
fn is_each_container_ready() -> impl Condition<Pod> {
|obj: Option<&Pod>| {
if let Some(o) = obj {
if let Some(s) = &o.status {
if let Some(conds) = &s.conditions {
if let Some(pcond) = conds.iter().find(|c| c.type_ == "ContainersReady") {
return pcond.status == "True";
}
}
}
}
false
}
}
let is_fully_ready = await_condition(
pods.clone(),
"busybox-kube4",
conditions::is_pod_running().and(is_each_container_ready()),
);
let _ = timeout(Duration::from_secs(10), is_fully_ready).await?;
// Delete it - and wait for deletion to complete
| pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored` | random_line_split |
lib.rs | ::api) with the generic Kubernetes [`Api`](crate::Api)
//! - [`derive`](kube_derive) with the [`CustomResource`](crate::CustomResource) derive for building controllers types
//! - [`runtime`](crate::runtime) with a [`Controller`](crate::runtime::Controller) / [`watcher`](crate::runtime::watcher()) / [`reflector`](crate::runtime::reflector::reflector) / [`Store`](crate::runtime::reflector::Store)
//! - [`core`](crate::core) with generics from `apimachinery`
//!
//! You can use each of these as you need with the help of the [exported features](https://github.com/kube-rs/kube/blob/main/kube/Cargo.toml#L18).
//!
//! # Using the Client
//! ```no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube::{Client, api::{Api, ResourceExt, ListParams, PostParams}};
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Infer the runtime environment and try to create a Kubernetes Client
//! let client = Client::try_default().await?;
//!
//! // Read pods in the configured namespace into the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi/*/k8s_openapi/) for documentation about the generated Kubernetes types
//!
//! # Using the Runtime with the Derive macro
//!
//! ```no_run
//! use schemars::JsonSchema;
//! use serde::{Deserialize, Serialize};
//! use serde_json::json;
//! use futures::{StreamExt, TryStreamExt};
//! use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
//! use kube::{
//! api::{Api, DeleteParams, PatchParams, Patch, ResourceExt},
//! core::CustomResourceExt,
//! Client, CustomResource,
//! runtime::{watcher, WatchStreamExt, wait::{conditions, await_condition}},
//! };
//!
//! // Our custom resource
//! #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
//! #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
//! pub struct FooSpec {
//! info: String,
//! #[schemars(length(min = 3))]
//! name: String,
//! replicas: i32,
//! }
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let client = Client::try_default().await?;
//! let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
//!
//! // Apply the CRD so users can create Foo instances in Kubernetes
//! crds.patch("foos.clux.dev",
//! &PatchParams::apply("my_manager"),
//! &Patch::Apply(Foo::crd())
//! ).await?;
//!
//! // Wait for the CRD to be ready
//! tokio::time::timeout(
//! std::time::Duration::from_secs(10),
//! await_condition(crds, "foos.clux.dev", conditions::is_crd_established())
//! ).await?;
//!
//! // Watch for changes to foos in the configured namespace
//! let foos: Api<Foo> = Api::default_namespaced(client.clone());
//! let wc = watcher::Config::default();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)]
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored`
#[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct | {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if!caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced {
Api::default_namespaced_with(client.clone(), &ar)
} else {
Api::all_with(client.clone(), &ar)
};
api.list(&Default::default()).await?;
}
}
// cleanup
crds.delete("testcrs.kube.rs", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create await a pod)"]
#[cfg(feature = "runtime")]
async fn pod_can_await_conditions() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, PostParams},
runtime::wait::{await_condition, conditions, delete::delete_and_finalize, Condition},
Api, Client,
};
use k8s_openapi::api::core::v1::Pod;
use std::time::Duration;
use tokio::time::timeout;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 20s
let data: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube4",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 20"],
}],
}
}))?;
let pp = PostParams::default();
assert_eq!(
data.name_unchecked(),
pods.create(&pp, &data).await?.name_unchecked()
);
// Watch it phase for a few seconds
let is_running = await_condition(pods.clone(), "busybox-kube4", conditions::is_pod_running());
let _ = timeout(Duration::from_secs(15), is_running).await?;
// Verify we can get it
let pod = pods.get("busybox-kube4").await?;
assert_eq!(pod.spec.as_ref().unwrap().containers[0].name, "busybox");
// Wait for a more complicated condition: ContainersReady AND Initialized
// TODO: remove these once we can write these functions generically
fn is_each_container_ready() -> impl Condition<Pod> {
|obj: Option<&Pod>| {
if let Some(o) = obj {
if let Some(s) = &o.status {
if let Some(conds) = &s.conditions {
if let Some(pcond) = conds.iter().find(|c| c.type_ == "ContainersReady") {
return pcond.status == "True";
}
}
}
}
false
}
}
let is_fully_ready = await_condition(
pods.clone(),
"busybox-kube4",
conditions::is_pod_running().and(is_each_container_ready()),
);
let _ = timeout(Duration::from_secs(10), is_fully_ready).await?;
// Delete it - and wait for deletion to complete
| ContainerSimple | identifier_name |
lib.rs | ::api) with the generic Kubernetes [`Api`](crate::Api)
//! - [`derive`](kube_derive) with the [`CustomResource`](crate::CustomResource) derive for building controllers types
//! - [`runtime`](crate::runtime) with a [`Controller`](crate::runtime::Controller) / [`watcher`](crate::runtime::watcher()) / [`reflector`](crate::runtime::reflector::reflector) / [`Store`](crate::runtime::reflector::Store)
//! - [`core`](crate::core) with generics from `apimachinery`
//!
//! You can use each of these as you need with the help of the [exported features](https://github.com/kube-rs/kube/blob/main/kube/Cargo.toml#L18).
//!
//! # Using the Client
//! ```no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube::{Client, api::{Api, ResourceExt, ListParams, PostParams}};
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Infer the runtime environment and try to create a Kubernetes Client
//! let client = Client::try_default().await?;
//!
//! // Read pods in the configured namespace into the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi/*/k8s_openapi/) for documentation about the generated Kubernetes types
//!
//! # Using the Runtime with the Derive macro
//!
//! ```no_run
//! use schemars::JsonSchema;
//! use serde::{Deserialize, Serialize};
//! use serde_json::json;
//! use futures::{StreamExt, TryStreamExt};
//! use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
//! use kube::{
//! api::{Api, DeleteParams, PatchParams, Patch, ResourceExt},
//! core::CustomResourceExt,
//! Client, CustomResource,
//! runtime::{watcher, WatchStreamExt, wait::{conditions, await_condition}},
//! };
//!
//! // Our custom resource
//! #[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
//! #[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
//! pub struct FooSpec {
//! info: String,
//! #[schemars(length(min = 3))]
//! name: String,
//! replicas: i32,
//! }
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let client = Client::try_default().await?;
//! let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
//!
//! // Apply the CRD so users can create Foo instances in Kubernetes
//! crds.patch("foos.clux.dev",
//! &PatchParams::apply("my_manager"),
//! &Patch::Apply(Foo::crd())
//! ).await?;
//!
//! // Wait for the CRD to be ready
//! tokio::time::timeout(
//! std::time::Duration::from_secs(10),
//! await_condition(crds, "foos.clux.dev", conditions::is_crd_established())
//! ).await?;
//!
//! // Watch for changes to foos in the configured namespace
//! let foos: Api<Foo> = Api::default_namespaced(client.clone());
//! let wc = watcher::Config::default();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)]
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored`
#[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct ContainerSimple {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if!caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced {
Api::default_namespaced_with(client.clone(), &ar)
} else {
Api::all_with(client.clone(), &ar)
};
api.list(&Default::default()).await?;
}
}
// cleanup
crds.delete("testcrs.kube.rs", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create await a pod)"]
#[cfg(feature = "runtime")]
async fn pod_can_await_conditions() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, PostParams},
runtime::wait::{await_condition, conditions, delete::delete_and_finalize, Condition},
Api, Client,
};
use k8s_openapi::api::core::v1::Pod;
use std::time::Duration;
use tokio::time::timeout;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 20s
let data: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube4",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 20"],
}],
}
}))?;
let pp = PostParams::default();
assert_eq!(
data.name_unchecked(),
pods.create(&pp, &data).await?.name_unchecked()
);
// Watch it phase for a few seconds
let is_running = await_condition(pods.clone(), "busybox-kube4", conditions::is_pod_running());
let _ = timeout(Duration::from_secs(15), is_running).await?;
// Verify we can get it
let pod = pods.get("busybox-kube4").await?;
assert_eq!(pod.spec.as_ref().unwrap().containers[0].name, "busybox");
// Wait for a more complicated condition: ContainersReady AND Initialized
// TODO: remove these once we can write these functions generically
fn is_each_container_ready() -> impl Condition<Pod> |
let is_fully_ready = await_condition(
pods.clone(),
"busybox-kube4",
conditions::is_pod_running().and(is_each_container_ready()),
);
let _ = timeout(Duration::from_secs(10), is_fully_ready).await?;
// Delete it - and wait for deletion to complete
| {
|obj: Option<&Pod>| {
if let Some(o) = obj {
if let Some(s) = &o.status {
if let Some(conds) = &s.conditions {
if let Some(pcond) = conds.iter().find(|c| c.type_ == "ContainersReady") {
return pcond.status == "True";
}
}
}
}
false
}
} | identifier_body |
cartesian.rs | // Copyright 2017 Nico Madysa.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//! Provides the function `cartesian::product()`.
//!
//! The name has been chosen entirely for this combination.
/// Iterates over the Cartesian product of a list of containers.
///
/// This essentially does the same as the macro
/// `itertools::iproduct!()`, but the number of arguments may be
/// decided at run-time. In return, this function requires that all
/// passed iterators yield items of the same type, whereas the
/// iterators passed to `itertools::iproduct!()` may be heterogenous.
/// Furthermore, the freedom of choosing the number of arguments at
/// run-time means that the product iterator iterates over vectors
/// instead of slices. This requires a heap allocation for every item.
///
/// The argument to this function is a slice of containers `C` with
/// items `T`. *Immutable references* to these containers must be
/// convertible to iterators over `&T`. This is necessary because we
/// need to pass over each container multiple times.
///
/// # Example
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let slices = [[1, 2], [11, 22]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), Some(vec![1, 11]));
/// assert_eq!(combinations.next(), Some(vec![1, 22]));
/// assert_eq!(combinations.next(), Some(vec![2, 11]));
/// assert_eq!(combinations.next(), Some(vec![2, 22]));
/// assert_eq!(combinations.next(), None);
/// ```
///
/// Note that if any one of the passed containers is empty, the product
/// as a whole is empty, too.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let vectors = [vec![1, 2], vec![11, 22], vec![]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), None);
/// ```
///
/// For mathematical correctness, the product of no collections at all
/// is one empty vector.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let combinations = cartesian::product(&[]);
/// assert_eq!(combinations.next(), Some(Vec::new()));
/// assert_eq!(combinations.next(), None);
/// ```
pub fn product<'a, C: 'a, T: 'a>(collections: &'a [C]) -> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
// We start with fresh iterators and a `next_item` full of `None`s.
let mut iterators = collections.iter().map(<&C>::into_iter).collect::<Vec<_>>();
let next_item = iterators.iter_mut().map(Iterator::next).collect();
Product {
collections,
iterators,
next_item,
}
}
/// Iterator returned by [`product()`].
///
/// [`product()`]:./fn.product.html
pub struct Product<'a, C: 'a, T: 'a>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// The underlying collections that we iterate over.
collections: &'a [C],
/// Our own set of sub-iterators, taken from `collections`.
iterators: Vec<<&'a C as IntoIterator>::IntoIter>,
/// The next item to yield.
next_item: Option<Vec<&'a T>>,
}
impl<'a, C, T> Iterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
type Item = Vec<&'a T>;
fn next(&mut self) -> Option<Self::Item> {
let result = self.next_item.clone();
self.advance();
result
}
/// Calculate bounds on the number of remaining elements.
///
/// This is calculated the same way as [`Product::len()`], but uses
/// a helper type to deal with the return type of `size_hint()`.
/// See there for information on why the used formula is corrected.
///
/// [`Product::len()`]: #method.len
fn size_hint(&self) -> (usize, Option<usize>) {
if self.next_item.is_none() {
return (0, Some(0));
}
let SizeHint(lower, upper) = SizeHint(1, Some(1))
+ self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
SizeHint::from(iterator)
* self.collections[i + 1..]
.iter()
.map(|c| SizeHint::from(&c.into_iter()))
.product()
})
.sum();
(lower, upper)
}
}
impl<'a, C, T> ExactSizeIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Calculates the exact number of remaining elements.
///
/// The length consists of the following contributions:
///
/// - 1 for the `next_item` to be yielded;
/// - `X` for each currently active iterator, where X is the
/// product of the iterators length and the sizes of all
/// *collections* to the right of it in the product.
///
/// Example
/// -------
///
/// Assume the Cartesian product `[1, 2, 3]×[1, 2]×[1, 2, 3]`. Upon
/// construction, the `Product` type creates three iterators `A`,
/// `B`, and `C` – one iterator for each array. It also extracts
/// one item from each to form `next_item`. Hence, `next_item`
/// contributes `1` to the total length. The three iterators
/// contribute as follows:
///
/// - A: 2 items left × collection of size 2 × collection of size
/// 3 = 12;
/// - B: 1 item left × collection of size 3 = 3;
/// - C: 2 items left = 2.
///
/// Thus, we end up with a total length of `1+12+3+2=18`. This is
/// the same length we get when multiplying the size of all passed
/// collections. (`3*2*3=18`) However, our (complicated) formula
/// also works when the iterator has already yielded some elements.
fn len(&self) -> usize {
if self.next_item.is_none() {
return 0;
}
1 + self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
iterator.len()
* self.collections[i + 1..]
.iter()
.map(|c| c.into_iter().len())
.product::<usize>()
})
.sum::<usize>()
}
}
impl<'a, C, T> ::std::iter::FusedIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{}
impl<'a, C, T> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// Advances the iterators and updates `self.next_item`.
///
/// This loop works like incrementing a number digit by digit. We
/// go over each iterator and its corresponding "digit" in
/// `next_item` in lockstep, starting at the back.
///
/// If we can advance the iterator, we update the "digit" and are
/// done. If the iterator is exhausted, we have to go from "9" to
/// "10": we restart the iterator, grab the first element, and move
/// on to the next digit.
///
/// The `break` expressions are to be understood literally: our
/// scheme can break in two ways.
/// 1. The very first iterator (`i==0`) is exhausted.
/// 2. A freshly restarted iterator is empty. (should never happen!)
/// In both cases, we want to exhaust `self` immediately. We do so
/// by breaking out of the loop, falling through to the very last
/// line, and manually set `self.next_item` to `None`.
///
/// Note that there is a so-called nullary case, when
/// `cartesian::product()` is called with an empty slice. While
/// this use-case is debatable, the mathematically correct way to
/// deal with it is to yield some empty vector once and then
/// nothing.
///
/// Luckily, we already handle this correctly! Because of the way
/// `Iterator::collect()` works when collecting into an
/// `Option<Vec<_>>`, `next_item` is initialized to some empty
/// vector, so this will be the first thing we yield. Then, when
/// `self.advance()` is called, we fall through the `while` loop and
/// immediately exhaust this iterator, yielding nothing more.
fn advance(&mut self) {
if let Some(ref mut next_item) = self.next_item {
let mut i = self.iterators.len();
while i > 0 {
i -= 1;
// Grab the next item from the current sub-iterator.
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// If that works, we're done!
return;
} else if i == 0 {
// Last sub-iterator is exhausted, so we're
// exhausted, too.
break;
}
// The current sub-terator is empty, start anew.
self.iterators[i] = self.collections[i].into_iter();
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// Roll over to the next sub-iterator.
} else {
// Should never happen: The freshly restarted
// sub-iterator is already empty.
break;
}
}
}
// Exhaust this iterator if the above loop `break`s.
self.next_item = None;
}
}
#[derive(Debug)]
struct SizeHint(usize, Option<usize>);
impl SizeHint {
fn into_inner(self) -> (usize, Option<usize>) {
(self.0, self.1)
}
}
impl<'a, I: Iterator> From<&'a I> for SizeHint {
fn from(iter: &'a I) -> Self {
let (lower, upper) = iter.size_hint();
SizeHint(lower, upper)
}
}
impl ::std::ops::Add for SizeHint {
type Output = Self;
fn add(self, other: Self) -> Self {
let lower = self.0 + other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left + right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::ops::Mul for SizeHint {
type Output = Self;
fn mul(self, other: Self) -> Self {
let lower = self.0 * other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left * right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::iter::Sum for SizeHint {
fn sum<I: I | ator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(0, Some(0)), |acc, x| acc + x)
}
}
impl ::std::iter::Product for SizeHint {
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(1, Some(1)), |acc, x| acc * x)
}
}
#[cfg(test)]
mod tests {
mod lengths {
use cartesian;
/// Asserts that the `len(V1×V2×...VN) ==
/// len(V1)×len(V2)×...len(VN)`.
fn assert_length<T>(vectors: &Vec<Vec<T>>) {
let expected_len = vectors.iter().map(Vec::len).product::<usize>();
let p = cartesian::product(vectors);
let (lower, upper) = p.size_hint();
let predicted_len = p.len();
let actual_len = p.collect::<Vec<Vec<&T>>>().len();
assert_eq!(expected_len, lower);
assert_eq!(expected_len, upper.unwrap());
assert_eq!(expected_len, predicted_len);
assert_eq!(expected_len, actual_len);
}
#[test]
fn test_length() {
let vectors = vec![vec![1, 1, 1, 1], vec![2, 2, 2, 2], vec![3, 3, 3, 3]];
assert_length(&vectors);
}
#[test]
fn test_unequal_length() {
let vectors = vec![vec![1, 1], vec![2, 2, 2, 2], vec![3]];
assert_length(&vectors);
}
#[test]
fn test_empty_vector() {
let one_is_empty = [vec![0; 3], vec![0; 3], vec![0; 0]];
let empty_product: Vec<_> = cartesian::product(&one_is_empty).collect();
assert_eq!(empty_product.len(), 0);
}
#[test]
fn test_nullary_product() {
let empty: [[u32; 1]; 0] = [];
let mut nullary_product = cartesian::product(&empty);
assert_eq!(nullary_product.next(), Some(Vec::new()));
assert_eq!(nullary_product.next(), None);
}
}
mod types {
use cartesian;
#[test]
fn test_i32() {
let numbers = [[0, 16, 32, 48], [0, 4, 8, 12], [0, 1, 2, 3]];
let expected: Vec<u32> = (0..64).collect();
let actual: Vec<u32> = cartesian::product(&numbers)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_string() {
use std::iter::FromIterator;
let letters = [
["A".to_string(), "B".to_string()],
["a".to_string(), "b".to_string()],
];
let expected = vec![
"Aa".to_string(),
"Ab".to_string(),
"Ba".to_string(),
"Bb".to_string(),
];
let actual: Vec<String> = cartesian::product(&letters)
.map(|combo| combo.into_iter().map(String::as_str))
.map(String::from_iter)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_slices() {
let bits: [[u8; 2]; 4] = [[0, 8], [0, 4], [0, 2], [0, 1]];
let expected: Vec<u8> = (0..16).collect();
let actual: Vec<u8> = cartesian::product(&bits)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
}
}
| ter | identifier_name |
cartesian.rs | // Copyright 2017 Nico Madysa.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//! Provides the function `cartesian::product()`.
//!
//! The name has been chosen entirely for this combination.
/// Iterates over the Cartesian product of a list of containers.
///
/// This essentially does the same as the macro
/// `itertools::iproduct!()`, but the number of arguments may be
/// decided at run-time. In return, this function requires that all
/// passed iterators yield items of the same type, whereas the
/// iterators passed to `itertools::iproduct!()` may be heterogenous.
/// Furthermore, the freedom of choosing the number of arguments at
/// run-time means that the product iterator iterates over vectors
/// instead of slices. This requires a heap allocation for every item.
///
/// The argument to this function is a slice of containers `C` with
/// items `T`. *Immutable references* to these containers must be
/// convertible to iterators over `&T`. This is necessary because we
/// need to pass over each container multiple times.
///
/// # Example
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let slices = [[1, 2], [11, 22]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), Some(vec![1, 11]));
/// assert_eq!(combinations.next(), Some(vec![1, 22]));
/// assert_eq!(combinations.next(), Some(vec![2, 11]));
/// assert_eq!(combinations.next(), Some(vec![2, 22]));
/// assert_eq!(combinations.next(), None);
/// ```
///
/// Note that if any one of the passed containers is empty, the product
/// as a whole is empty, too.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let vectors = [vec![1, 2], vec![11, 22], vec![]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), None);
/// ```
///
/// For mathematical correctness, the product of no collections at all
/// is one empty vector.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let combinations = cartesian::product(&[]);
/// assert_eq!(combinations.next(), Some(Vec::new()));
/// assert_eq!(combinations.next(), None);
/// ```
pub fn product<'a, C: 'a, T: 'a>(collections: &'a [C]) -> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
// We start with fresh iterators and a `next_item` full of `None`s.
let mut iterators = collections.iter().map(<&C>::into_iter).collect::<Vec<_>>();
let next_item = iterators.iter_mut().map(Iterator::next).collect();
Product {
collections,
iterators,
next_item,
}
}
/// Iterator returned by [`product()`].
///
/// [`product()`]:./fn.product.html
pub struct Product<'a, C: 'a, T: 'a>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// The underlying collections that we iterate over.
collections: &'a [C],
/// Our own set of sub-iterators, taken from `collections`.
iterators: Vec<<&'a C as IntoIterator>::IntoIter>,
/// The next item to yield.
next_item: Option<Vec<&'a T>>,
}
impl<'a, C, T> Iterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
type Item = Vec<&'a T>;
fn next(&mut self) -> Option<Self::Item> {
let result = self.next_item.clone();
self.advance();
result
}
/// Calculate bounds on the number of remaining elements.
///
/// This is calculated the same way as [`Product::len()`], but uses
/// a helper type to deal with the return type of `size_hint()`.
/// See there for information on why the used formula is corrected.
///
/// [`Product::len()`]: #method.len
fn size_hint(&self) -> (usize, Option<usize>) {
if self.next_item.is_none() {
return (0, Some(0));
}
let SizeHint(lower, upper) = SizeHint(1, Some(1))
+ self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
SizeHint::from(iterator)
* self.collections[i + 1..]
.iter()
.map(|c| SizeHint::from(&c.into_iter()))
.product()
})
.sum();
(lower, upper)
}
}
impl<'a, C, T> ExactSizeIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Calculates the exact number of remaining elements.
///
/// The length consists of the following contributions:
///
/// - 1 for the `next_item` to be yielded;
/// - `X` for each currently active iterator, where X is the
/// product of the iterators length and the sizes of all
/// *collections* to the right of it in the product.
///
/// Example
/// -------
///
/// Assume the Cartesian product `[1, 2, 3]×[1, 2]×[1, 2, 3]`. Upon
/// construction, the `Product` type creates three iterators `A`,
/// `B`, and `C` – one iterator for each array. It also extracts
/// one item from each to form `next_item`. Hence, `next_item`
/// contributes `1` to the total length. The three iterators
/// contribute as follows:
///
/// - A: 2 items left × collection of size 2 × collection of size
/// 3 = 12;
/// - B: 1 item left × collection of size 3 = 3;
/// - C: 2 items left = 2.
///
/// Thus, we end up with a total length of `1+12+3+2=18`. This is
/// the same length we get when multiplying the size of all passed
/// collections. (`3*2*3=18`) However, our (complicated) formula
/// also works when the iterator has already yielded some elements.
fn len(&self) -> usize {
if self.next_item.is_none() {
| 1 + self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
iterator.len()
* self.collections[i + 1..]
.iter()
.map(|c| c.into_iter().len())
.product::<usize>()
})
.sum::<usize>()
}
}
impl<'a, C, T> ::std::iter::FusedIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{}
impl<'a, C, T> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// Advances the iterators and updates `self.next_item`.
///
/// This loop works like incrementing a number digit by digit. We
/// go over each iterator and its corresponding "digit" in
/// `next_item` in lockstep, starting at the back.
///
/// If we can advance the iterator, we update the "digit" and are
/// done. If the iterator is exhausted, we have to go from "9" to
/// "10": we restart the iterator, grab the first element, and move
/// on to the next digit.
///
/// The `break` expressions are to be understood literally: our
/// scheme can break in two ways.
/// 1. The very first iterator (`i==0`) is exhausted.
/// 2. A freshly restarted iterator is empty. (should never happen!)
/// In both cases, we want to exhaust `self` immediately. We do so
/// by breaking out of the loop, falling through to the very last
/// line, and manually set `self.next_item` to `None`.
///
/// Note that there is a so-called nullary case, when
/// `cartesian::product()` is called with an empty slice. While
/// this use-case is debatable, the mathematically correct way to
/// deal with it is to yield some empty vector once and then
/// nothing.
///
/// Luckily, we already handle this correctly! Because of the way
/// `Iterator::collect()` works when collecting into an
/// `Option<Vec<_>>`, `next_item` is initialized to some empty
/// vector, so this will be the first thing we yield. Then, when
/// `self.advance()` is called, we fall through the `while` loop and
/// immediately exhaust this iterator, yielding nothing more.
fn advance(&mut self) {
if let Some(ref mut next_item) = self.next_item {
let mut i = self.iterators.len();
while i > 0 {
i -= 1;
// Grab the next item from the current sub-iterator.
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// If that works, we're done!
return;
} else if i == 0 {
// Last sub-iterator is exhausted, so we're
// exhausted, too.
break;
}
// The current sub-terator is empty, start anew.
self.iterators[i] = self.collections[i].into_iter();
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// Roll over to the next sub-iterator.
} else {
// Should never happen: The freshly restarted
// sub-iterator is already empty.
break;
}
}
}
// Exhaust this iterator if the above loop `break`s.
self.next_item = None;
}
}
#[derive(Debug)]
struct SizeHint(usize, Option<usize>);
impl SizeHint {
fn into_inner(self) -> (usize, Option<usize>) {
(self.0, self.1)
}
}
impl<'a, I: Iterator> From<&'a I> for SizeHint {
fn from(iter: &'a I) -> Self {
let (lower, upper) = iter.size_hint();
SizeHint(lower, upper)
}
}
impl ::std::ops::Add for SizeHint {
type Output = Self;
fn add(self, other: Self) -> Self {
let lower = self.0 + other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left + right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::ops::Mul for SizeHint {
type Output = Self;
fn mul(self, other: Self) -> Self {
let lower = self.0 * other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left * right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::iter::Sum for SizeHint {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(0, Some(0)), |acc, x| acc + x)
}
}
impl ::std::iter::Product for SizeHint {
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(1, Some(1)), |acc, x| acc * x)
}
}
#[cfg(test)]
mod tests {
mod lengths {
use cartesian;
/// Asserts that the `len(V1×V2×...VN) ==
/// len(V1)×len(V2)×...len(VN)`.
fn assert_length<T>(vectors: &Vec<Vec<T>>) {
let expected_len = vectors.iter().map(Vec::len).product::<usize>();
let p = cartesian::product(vectors);
let (lower, upper) = p.size_hint();
let predicted_len = p.len();
let actual_len = p.collect::<Vec<Vec<&T>>>().len();
assert_eq!(expected_len, lower);
assert_eq!(expected_len, upper.unwrap());
assert_eq!(expected_len, predicted_len);
assert_eq!(expected_len, actual_len);
}
#[test]
fn test_length() {
let vectors = vec![vec![1, 1, 1, 1], vec![2, 2, 2, 2], vec![3, 3, 3, 3]];
assert_length(&vectors);
}
#[test]
fn test_unequal_length() {
let vectors = vec![vec![1, 1], vec![2, 2, 2, 2], vec![3]];
assert_length(&vectors);
}
#[test]
fn test_empty_vector() {
let one_is_empty = [vec![0; 3], vec![0; 3], vec![0; 0]];
let empty_product: Vec<_> = cartesian::product(&one_is_empty).collect();
assert_eq!(empty_product.len(), 0);
}
#[test]
fn test_nullary_product() {
let empty: [[u32; 1]; 0] = [];
let mut nullary_product = cartesian::product(&empty);
assert_eq!(nullary_product.next(), Some(Vec::new()));
assert_eq!(nullary_product.next(), None);
}
}
mod types {
use cartesian;
#[test]
fn test_i32() {
let numbers = [[0, 16, 32, 48], [0, 4, 8, 12], [0, 1, 2, 3]];
let expected: Vec<u32> = (0..64).collect();
let actual: Vec<u32> = cartesian::product(&numbers)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_string() {
use std::iter::FromIterator;
let letters = [
["A".to_string(), "B".to_string()],
["a".to_string(), "b".to_string()],
];
let expected = vec![
"Aa".to_string(),
"Ab".to_string(),
"Ba".to_string(),
"Bb".to_string(),
];
let actual: Vec<String> = cartesian::product(&letters)
.map(|combo| combo.into_iter().map(String::as_str))
.map(String::from_iter)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_slices() {
let bits: [[u8; 2]; 4] = [[0, 8], [0, 4], [0, 2], [0, 1]];
let expected: Vec<u8> = (0..16).collect();
let actual: Vec<u8> = cartesian::product(&bits)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
}
}
| return 0;
}
| conditional_block |
cartesian.rs | // Copyright 2017 Nico Madysa.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//! Provides the function `cartesian::product()`.
//!
//! The name has been chosen entirely for this combination.
/// Iterates over the Cartesian product of a list of containers.
///
/// This essentially does the same as the macro
/// `itertools::iproduct!()`, but the number of arguments may be
/// decided at run-time. In return, this function requires that all
/// passed iterators yield items of the same type, whereas the
/// iterators passed to `itertools::iproduct!()` may be heterogenous.
/// Furthermore, the freedom of choosing the number of arguments at
/// run-time means that the product iterator iterates over vectors
/// instead of slices. This requires a heap allocation for every item.
///
/// The argument to this function is a slice of containers `C` with
/// items `T`. *Immutable references* to these containers must be
/// convertible to iterators over `&T`. This is necessary because we
/// need to pass over each container multiple times.
///
/// # Example
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let slices = [[1, 2], [11, 22]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), Some(vec![1, 11]));
/// assert_eq!(combinations.next(), Some(vec![1, 22]));
/// assert_eq!(combinations.next(), Some(vec![2, 11]));
/// assert_eq!(combinations.next(), Some(vec![2, 22]));
/// assert_eq!(combinations.next(), None);
/// ```
///
/// Note that if any one of the passed containers is empty, the product
/// as a whole is empty, too.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let vectors = [vec![1, 2], vec![11, 22], vec![]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), None);
/// ```
///
/// For mathematical correctness, the product of no collections at all
/// is one empty vector.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let combinations = cartesian::product(&[]);
/// assert_eq!(combinations.next(), Some(Vec::new()));
/// assert_eq!(combinations.next(), None);
/// ```
pub fn product<'a, C: 'a, T: 'a>(collections: &'a [C]) -> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
// We start with fresh iterators and a `next_item` full of `None`s.
let mut iterators = collections.iter().map(<&C>::into_iter).collect::<Vec<_>>();
let next_item = iterators.iter_mut().map(Iterator::next).collect();
Product {
collections,
iterators,
next_item,
}
}
/// Iterator returned by [`product()`].
///
/// [`product()`]:./fn.product.html
pub struct Product<'a, C: 'a, T: 'a>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// The underlying collections that we iterate over.
collections: &'a [C],
/// Our own set of sub-iterators, taken from `collections`.
iterators: Vec<<&'a C as IntoIterator>::IntoIter>,
/// The next item to yield.
next_item: Option<Vec<&'a T>>,
}
impl<'a, C, T> Iterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
type Item = Vec<&'a T>;
fn next(&mut self) -> Option<Self::Item> {
let result = self.next_item.clone();
self.advance();
result
}
/// Calculate bounds on the number of remaining elements.
///
/// This is calculated the same way as [`Product::len()`], but uses
/// a helper type to deal with the return type of `size_hint()`.
/// See there for information on why the used formula is corrected.
///
/// [`Product::len()`]: #method.len
fn size_hint(&self) -> (usize, Option<usize>) {
if self.next_item.is_none() {
return (0, Some(0));
}
let SizeHint(lower, upper) = SizeHint(1, Some(1))
+ self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
SizeHint::from(iterator)
* self.collections[i + 1..]
.iter()
.map(|c| SizeHint::from(&c.into_iter()))
.product()
})
.sum();
(lower, upper)
}
}
impl<'a, C, T> ExactSizeIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Calculates the exact number of remaining elements.
///
/// The length consists of the following contributions:
///
/// - 1 for the `next_item` to be yielded;
/// - `X` for each currently active iterator, where X is the
/// product of the iterators length and the sizes of all
/// *collections* to the right of it in the product.
///
/// Example
/// -------
///
/// Assume the Cartesian product `[1, 2, 3]×[1, 2]×[1, 2, 3]`. Upon
/// construction, the `Product` type creates three iterators `A`,
/// `B`, and `C` – one iterator for each array. It also extracts
/// one item from each to form `next_item`. Hence, `next_item`
/// contributes `1` to the total length. The three iterators
/// contribute as follows:
///
/// - A: 2 items left × collection of size 2 × collection of size
/// 3 = 12;
/// - B: 1 item left × collection of size 3 = 3;
/// - C: 2 items left = 2.
///
/// Thus, we end up with a total length of `1+12+3+2=18`. This is
/// the same length we get when multiplying the size of all passed
/// collections. (`3*2*3=18`) However, our (complicated) formula
/// also works when the iterator has already yielded some elements.
fn len(&self) -> usize {
if self.next_item.is_none() {
return 0;
}
1 + self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
iterator.len()
* self.collections[i + 1..]
.iter()
.map(|c| c.into_iter().len())
.product::<usize>()
})
.sum::<usize>()
}
}
impl<'a, C, T> ::std::iter::FusedIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{}
impl<'a, C, T> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// Advances the iterators and updates `self.next_item`.
///
/// This loop works like incrementing a number digit by digit. We
/// go over each iterator and its corresponding "digit" in
/// `next_item` in lockstep, starting at the back.
///
/// If we can advance the iterator, we update the "digit" and are
/// done. If the iterator is exhausted, we have to go from "9" to
/// "10": we restart the iterator, grab the first element, and move
/// on to the next digit.
///
/// The `break` expressions are to be understood literally: our
/// scheme can break in two ways.
/// 1. The very first iterator (`i==0`) is exhausted.
/// 2. A freshly restarted iterator is empty. (should never happen!)
/// In both cases, we want to exhaust `self` immediately. We do so
/// by breaking out of the loop, falling through to the very last
/// line, and manually set `self.next_item` to `None`.
///
/// Note that there is a so-called nullary case, when
/// `cartesian::product()` is called with an empty slice. While
/// this use-case is debatable, the mathematically correct way to
/// deal with it is to yield some empty vector once and then
/// nothing.
///
/// Luckily, we already handle this correctly! Because of the way
/// `Iterator::collect()` works when collecting into an
/// `Option<Vec<_>>`, `next_item` is initialized to some empty
/// vector, so this will be the first thing we yield. Then, when
/// `self.advance()` is called, we fall through the `while` loop and
/// immediately exhaust this iterator, yielding nothing more.
fn advance(&mut self) {
if let Some(ref mut next_item) = self.next_item {
let mut i = self.iterators.len();
while i > 0 {
i -= 1;
// Grab the next item from the current sub-iterator.
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// If that works, we're done!
return;
} else if i == 0 {
// Last sub-iterator is exhausted, so we're
// exhausted, too.
break;
}
// The current sub-terator is empty, start anew.
self.iterators[i] = self.collections[i].into_iter();
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// Roll over to the next sub-iterator.
} else {
// Should never happen: The freshly restarted
// sub-iterator is already empty.
break;
}
}
}
// Exhaust this iterator if the above loop `break`s.
self.next_item = None;
}
}
#[derive(Debug)]
struct SizeHint(usize, Option<usize>);
impl SizeHint {
fn into_inner(self) -> (usize, Option<usize>) {
(self.0, self.1)
}
}
impl<'a, I: Iterator> From<&'a I> for SizeHint {
fn from(iter: &'a I) -> Self {
let (lower, upper) = iter.size_hint();
SizeHint(lower, upper)
}
}
impl ::std::ops::Add for SizeHint {
type Output = Self;
fn add(self, other: Self) -> Self {
let lower = self.0 + other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left + right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::ops::Mul for SizeHint {
type Output = Self;
fn mul(self, other: Self) -> Self {
| ::std::iter::Sum for SizeHint {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(0, Some(0)), |acc, x| acc + x)
}
}
impl ::std::iter::Product for SizeHint {
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(1, Some(1)), |acc, x| acc * x)
}
}
#[cfg(test)]
mod tests {
mod lengths {
use cartesian;
/// Asserts that the `len(V1×V2×...VN) ==
/// len(V1)×len(V2)×...len(VN)`.
fn assert_length<T>(vectors: &Vec<Vec<T>>) {
let expected_len = vectors.iter().map(Vec::len).product::<usize>();
let p = cartesian::product(vectors);
let (lower, upper) = p.size_hint();
let predicted_len = p.len();
let actual_len = p.collect::<Vec<Vec<&T>>>().len();
assert_eq!(expected_len, lower);
assert_eq!(expected_len, upper.unwrap());
assert_eq!(expected_len, predicted_len);
assert_eq!(expected_len, actual_len);
}
#[test]
fn test_length() {
let vectors = vec![vec![1, 1, 1, 1], vec![2, 2, 2, 2], vec![3, 3, 3, 3]];
assert_length(&vectors);
}
#[test]
fn test_unequal_length() {
let vectors = vec![vec![1, 1], vec![2, 2, 2, 2], vec![3]];
assert_length(&vectors);
}
#[test]
fn test_empty_vector() {
let one_is_empty = [vec![0; 3], vec![0; 3], vec![0; 0]];
let empty_product: Vec<_> = cartesian::product(&one_is_empty).collect();
assert_eq!(empty_product.len(), 0);
}
#[test]
fn test_nullary_product() {
let empty: [[u32; 1]; 0] = [];
let mut nullary_product = cartesian::product(&empty);
assert_eq!(nullary_product.next(), Some(Vec::new()));
assert_eq!(nullary_product.next(), None);
}
}
mod types {
use cartesian;
#[test]
fn test_i32() {
let numbers = [[0, 16, 32, 48], [0, 4, 8, 12], [0, 1, 2, 3]];
let expected: Vec<u32> = (0..64).collect();
let actual: Vec<u32> = cartesian::product(&numbers)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_string() {
use std::iter::FromIterator;
let letters = [
["A".to_string(), "B".to_string()],
["a".to_string(), "b".to_string()],
];
let expected = vec![
"Aa".to_string(),
"Ab".to_string(),
"Ba".to_string(),
"Bb".to_string(),
];
let actual: Vec<String> = cartesian::product(&letters)
.map(|combo| combo.into_iter().map(String::as_str))
.map(String::from_iter)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_slices() {
let bits: [[u8; 2]; 4] = [[0, 8], [0, 4], [0, 2], [0, 1]];
let expected: Vec<u8> = (0..16).collect();
let actual: Vec<u8> = cartesian::product(&bits)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
}
}
| let lower = self.0 * other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left * right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl | identifier_body |
cartesian.rs | // Copyright 2017 Nico Madysa.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//! Provides the function `cartesian::product()`.
//!
//! The name has been chosen entirely for this combination.
/// Iterates over the Cartesian product of a list of containers.
///
/// This essentially does the same as the macro
/// `itertools::iproduct!()`, but the number of arguments may be
/// decided at run-time. In return, this function requires that all
/// passed iterators yield items of the same type, whereas the
/// iterators passed to `itertools::iproduct!()` may be heterogenous.
/// Furthermore, the freedom of choosing the number of arguments at
/// run-time means that the product iterator iterates over vectors
/// instead of slices. This requires a heap allocation for every item.
///
/// The argument to this function is a slice of containers `C` with
/// items `T`. *Immutable references* to these containers must be
/// convertible to iterators over `&T`. This is necessary because we
/// need to pass over each container multiple times.
///
/// # Example
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let slices = [[1, 2], [11, 22]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), Some(vec![1, 11]));
/// assert_eq!(combinations.next(), Some(vec![1, 22]));
/// assert_eq!(combinations.next(), Some(vec![2, 11]));
/// assert_eq!(combinations.next(), Some(vec![2, 22]));
/// assert_eq!(combinations.next(), None);
/// ```
///
/// Note that if any one of the passed containers is empty, the product
/// as a whole is empty, too.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let vectors = [vec![1, 2], vec![11, 22], vec![]];
/// let combinations = cartesian::product(&slices);
/// assert_eq!(combinations.next(), None);
/// ```
///
/// For mathematical correctness, the product of no collections at all
/// is one empty vector.
///
/// ```rust
/// extern crate scenarios;
///
/// use scenarios::cartesian;
///
/// let combinations = cartesian::product(&[]);
/// assert_eq!(combinations.next(), Some(Vec::new()));
/// assert_eq!(combinations.next(), None);
/// ```
pub fn product<'a, C: 'a, T: 'a>(collections: &'a [C]) -> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
// We start with fresh iterators and a `next_item` full of `None`s.
let mut iterators = collections.iter().map(<&C>::into_iter).collect::<Vec<_>>();
let next_item = iterators.iter_mut().map(Iterator::next).collect();
Product {
collections,
iterators,
next_item,
}
}
/// Iterator returned by [`product()`].
///
/// [`product()`]:./fn.product.html
pub struct Product<'a, C: 'a, T: 'a>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// The underlying collections that we iterate over.
collections: &'a [C],
/// Our own set of sub-iterators, taken from `collections`.
iterators: Vec<<&'a C as IntoIterator>::IntoIter>,
/// The next item to yield.
next_item: Option<Vec<&'a T>>,
}
impl<'a, C, T> Iterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
type Item = Vec<&'a T>;
fn next(&mut self) -> Option<Self::Item> {
let result = self.next_item.clone();
self.advance();
result
}
/// Calculate bounds on the number of remaining elements.
///
/// This is calculated the same way as [`Product::len()`], but uses
/// a helper type to deal with the return type of `size_hint()`.
/// See there for information on why the used formula is corrected.
///
/// [`Product::len()`]: #method.len
fn size_hint(&self) -> (usize, Option<usize>) {
if self.next_item.is_none() {
return (0, Some(0));
}
let SizeHint(lower, upper) = SizeHint(1, Some(1))
+ self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
SizeHint::from(iterator)
* self.collections[i + 1..]
.iter()
.map(|c| SizeHint::from(&c.into_iter()))
.product()
})
.sum();
(lower, upper)
}
}
impl<'a, C, T> ExactSizeIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Calculates the exact number of remaining elements.
///
/// The length consists of the following contributions:
///
/// - 1 for the `next_item` to be yielded;
/// - `X` for each currently active iterator, where X is the
/// product of the iterators length and the sizes of all
/// *collections* to the right of it in the product.
///
/// Example
/// -------
///
/// Assume the Cartesian product `[1, 2, 3]×[1, 2]×[1, 2, 3]`. Upon
/// construction, the `Product` type creates three iterators `A`,
/// `B`, and `C` – one iterator for each array. It also extracts
/// one item from each to form `next_item`. Hence, `next_item`
/// contributes `1` to the total length. The three iterators
/// contribute as follows:
///
/// - A: 2 items left × collection of size 2 × collection of size
/// 3 = 12;
/// - B: 1 item left × collection of size 3 = 3;
/// - C: 2 items left = 2.
///
/// Thus, we end up with a total length of `1+12+3+2=18`. This is
/// the same length we get when multiplying the size of all passed
/// collections. (`3*2*3=18`) However, our (complicated) formula
/// also works when the iterator has already yielded some elements.
fn len(&self) -> usize {
if self.next_item.is_none() {
return 0;
}
1 + self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
iterator.len()
* self.collections[i + 1..]
.iter()
.map(|c| c.into_iter().len())
.product::<usize>()
})
.sum::<usize>()
}
}
impl<'a, C, T> ::std::iter::FusedIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{}
impl<'a, C, T> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// Advances the iterators and updates `self.next_item`.
///
/// This loop works like incrementing a number digit by digit. We
/// go over each iterator and its corresponding "digit" in
/// `next_item` in lockstep, starting at the back.
///
/// If we can advance the iterator, we update the "digit" and are
/// done. If the iterator is exhausted, we have to go from "9" to
/// "10": we restart the iterator, grab the first element, and move
/// on to the next digit.
///
/// The `break` expressions are to be understood literally: our
/// scheme can break in two ways.
/// 1. The very first iterator (`i==0`) is exhausted.
/// 2. A freshly restarted iterator is empty. (should never happen!)
/// In both cases, we want to exhaust `self` immediately. We do so
/// by breaking out of the loop, falling through to the very last
/// line, and manually set `self.next_item` to `None`.
///
/// Note that there is a so-called nullary case, when
/// `cartesian::product()` is called with an empty slice. While
/// this use-case is debatable, the mathematically correct way to
/// deal with it is to yield some empty vector once and then
/// nothing.
///
/// Luckily, we already handle this correctly! Because of the way
/// `Iterator::collect()` works when collecting into an
/// `Option<Vec<_>>`, `next_item` is initialized to some empty
/// vector, so this will be the first thing we yield. Then, when
/// `self.advance()` is called, we fall through the `while` loop and
/// immediately exhaust this iterator, yielding nothing more.
fn advance(&mut self) {
if let Some(ref mut next_item) = self.next_item {
let mut i = self.iterators.len();
while i > 0 {
i -= 1;
// Grab the next item from the current sub-iterator.
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// If that works, we're done!
return;
} else if i == 0 {
// Last sub-iterator is exhausted, so we're
// exhausted, too.
break;
}
// The current sub-terator is empty, start anew.
self.iterators[i] = self.collections[i].into_iter();
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// Roll over to the next sub-iterator.
} else {
// Should never happen: The freshly restarted
// sub-iterator is already empty.
break;
}
}
}
// Exhaust this iterator if the above loop `break`s.
self.next_item = None;
}
}
#[derive(Debug)]
struct SizeHint(usize, Option<usize>);
impl SizeHint {
fn into_inner(self) -> (usize, Option<usize>) {
(self.0, self.1)
}
}
impl<'a, I: Iterator> From<&'a I> for SizeHint {
fn from(iter: &'a I) -> Self {
let (lower, upper) = iter.size_hint();
SizeHint(lower, upper)
}
}
impl ::std::ops::Add for SizeHint {
type Output = Self;
fn add(self, other: Self) -> Self {
let lower = self.0 + other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left + right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::ops::Mul for SizeHint { | (Some(left), Some(right)) => Some(left * right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::iter::Sum for SizeHint {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(0, Some(0)), |acc, x| acc + x)
}
}
impl ::std::iter::Product for SizeHint {
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(SizeHint(1, Some(1)), |acc, x| acc * x)
}
}
#[cfg(test)]
mod tests {
mod lengths {
use cartesian;
/// Asserts that the `len(V1×V2×...VN) ==
/// len(V1)×len(V2)×...len(VN)`.
fn assert_length<T>(vectors: &Vec<Vec<T>>) {
let expected_len = vectors.iter().map(Vec::len).product::<usize>();
let p = cartesian::product(vectors);
let (lower, upper) = p.size_hint();
let predicted_len = p.len();
let actual_len = p.collect::<Vec<Vec<&T>>>().len();
assert_eq!(expected_len, lower);
assert_eq!(expected_len, upper.unwrap());
assert_eq!(expected_len, predicted_len);
assert_eq!(expected_len, actual_len);
}
#[test]
fn test_length() {
let vectors = vec![vec![1, 1, 1, 1], vec![2, 2, 2, 2], vec![3, 3, 3, 3]];
assert_length(&vectors);
}
#[test]
fn test_unequal_length() {
let vectors = vec![vec![1, 1], vec![2, 2, 2, 2], vec![3]];
assert_length(&vectors);
}
#[test]
fn test_empty_vector() {
let one_is_empty = [vec![0; 3], vec![0; 3], vec![0; 0]];
let empty_product: Vec<_> = cartesian::product(&one_is_empty).collect();
assert_eq!(empty_product.len(), 0);
}
#[test]
fn test_nullary_product() {
let empty: [[u32; 1]; 0] = [];
let mut nullary_product = cartesian::product(&empty);
assert_eq!(nullary_product.next(), Some(Vec::new()));
assert_eq!(nullary_product.next(), None);
}
}
mod types {
use cartesian;
#[test]
fn test_i32() {
let numbers = [[0, 16, 32, 48], [0, 4, 8, 12], [0, 1, 2, 3]];
let expected: Vec<u32> = (0..64).collect();
let actual: Vec<u32> = cartesian::product(&numbers)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_string() {
use std::iter::FromIterator;
let letters = [
["A".to_string(), "B".to_string()],
["a".to_string(), "b".to_string()],
];
let expected = vec![
"Aa".to_string(),
"Ab".to_string(),
"Ba".to_string(),
"Bb".to_string(),
];
let actual: Vec<String> = cartesian::product(&letters)
.map(|combo| combo.into_iter().map(String::as_str))
.map(String::from_iter)
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_slices() {
let bits: [[u8; 2]; 4] = [[0, 8], [0, 4], [0, 2], [0, 1]];
let expected: Vec<u8> = (0..16).collect();
let actual: Vec<u8> = cartesian::product(&bits)
.map(Vec::into_iter)
.map(Iterator::sum)
.collect();
assert_eq!(expected, actual);
}
}
} | type Output = Self;
fn mul(self, other: Self) -> Self {
let lower = self.0 * other.0;
let upper = match (self.1, other.1) { | random_line_split |
rsqf.rs | use block;
use logical;
use murmur::Murmur3Hash;
#[allow(dead_code)] // for now
pub struct RSQF {
meta: Metadata,
logical: logical::LogicalData,
}
#[allow(dead_code)] // for now | struct Metadata {
n: usize,
qbits: usize,
rbits: usize,
nblocks: usize,
nelements: usize,
ndistinct_elements: usize,
nslots: usize,
noccupied_slots: usize,
max_slots: usize,
}
/// Standard filter result type, on success returns a count on error returns a message
/// This should probably be richer over time
type FilterResult = Result<usize, &'static str>;
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl RSQF {
pub fn new(n: usize, rbits: usize) -> RSQF {
RSQF::from_n_and_r(n, rbits)
}
/// Creates a structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> RSQF {
RSQF::from_metadata(Metadata::from_n_and_r(n, rbits))
}
/// Creates an instance of the filter given the description of the filter parameters stored in
/// a `Metadata` structure
fn from_metadata(meta: Metadata) -> RSQF {
let logical = logical::LogicalData::new(meta.nslots, meta.rbits);
return RSQF { meta, logical };
}
/// Queries the filter for the presence of `hash`.
///
/// If `hash` is not present, returns 0
/// If `hash` is likely to be present, returns an approximate count of the number of times
/// `hash` has been inserted. Note that this is approximate; it is possible that `hash` is
/// actually not present but a non-zero count is returned, with a probability no worse than
/// `2^-rbits`
pub fn get_count(&self, hash: Murmur3Hash) -> usize {
panic!("NYI");
}
/// Adds `count` to the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, it is added with `count` count.
/// If `hash` is present, `count` is added to its existing count.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn add_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
/// Increments the count of `hash` by one.
///
/// If `hash` is not present, it's added with a count of one.
/// If `hash` is present, its existing count is incremented.
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn inc_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.add_count(hash, 1);
}
/// Subtracts `count` from the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, an error is returned
/// If `hash` is present, `count` is subtracted from the existing count. The resulting count
/// is returned. If subtracting `count` from the existing count results in a value less than
/// 1, a resulting count of 0 is returned and `hash` is removed from the filter.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, which may be 0 in which case `hash` has
/// been removed from the filter, or an error if `hash` was not found in the filter
pub fn sub_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
pub fn dec_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.sub_count(hash, 1);
}
/// Given a Murmur3 hash as input, extracts the quotient `q` and remainder `r` which will be
/// used to look up this item in the filter.
///
/// Though both values are of type `u64`, the number of bits used in each is based on the size
/// (`n`) and false-positive rate (`rbits`) specified when the filter was created
fn get_q_and_r(&self, hash: Murmur3Hash) -> (u64, u64) {
//Use only the 64-bit hash and pull out the bits we'll use for q and r
let hash = hash.value64();
// To compute the quotient q for this hash, shift right to remove the bits to be used as
// the remainder r, then mask out q bits
let q = (hash.wrapping_shr(self.meta.rbits as u32)) & bitmask!(self.meta.qbits);
let r = hash & bitmask!(self.meta.rbits as u32);
(q, r)
}
}
#[cfg(test)]
mod rsqf_tests {
use super::*;
use murmur::Murmur3Hash;
#[test]
fn creates_empty_filter() {
let _filter = RSQF::new(10000, 9);
}
#[test]
#[should_panic]
fn panics_on_invalid_r() {
RSQF::new(10000, 8);
}
#[test]
fn computes_valid_metadata() {
let filter = RSQF::new(10000, 9);
assert_eq!(filter.meta.n, 10000);
assert_eq!(filter.meta.rbits, 9);
assert_eq!(filter.meta.qbits, 14);
assert_eq!(filter.meta.nslots, 1usize << 14);
assert_eq!(filter.meta.nblocks, (filter.meta.nslots + 64 - 1) / 64);
assert_eq!(filter.meta.noccupied_slots, 0);
assert_eq!(filter.meta.nelements, 0);
assert_eq!(filter.meta.ndistinct_elements, 0);
assert_eq!(
filter.meta.max_slots,
((filter.meta.nslots as f64) * 0.95) as usize
);
}
#[test]
#[ignore]
fn get_count_nonexistent_item_returns_zero() {
let filter = RSQF::new(10000, 9);
assert_eq!(0, filter.get_count(Murmur3Hash::new(1)));
}
#[test]
fn get_q_and_r_returns_correct_results() {
let test_data = [
// (n, rbits, hash)
(30usize, 9usize, 0x0000_0000u128),
(30usize, 9usize, 0b0000_0001_1111_1111u128),
(30usize, 9usize, 0b1111_0001_1111_0000u128),
];
for (n, rbits, hash) in test_data.into_iter() {
let filter = RSQF::new(*n, *rbits);
println!(
"n={} qbits={} rbits={} hash={:x}",
n, filter.meta.qbits, rbits, hash
);
let hash = Murmur3Hash::new(*hash);
let (q, r) = filter.get_q_and_r(hash);
println!("q={:x}", q);
println!("r={:x}", r);
let rbitmask = u128::max_value() >> (128 - *rbits);
let qbitmask = u128::max_value() >> (128 - filter.meta.qbits);
//The lower rbits bits of the hash should be r
assert_eq!(hash.value128() & rbitmask, r as u128);
assert_eq!((hash.value128() >> rbits) & qbitmask, q as u128);
}
}
}
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl Metadata {
/// Creates a metadata structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> Metadata {
assert!(block::SLOTS_PER_BLOCK == 64usize); //this code assumes 64 slots per block always
assert!(rbits as usize == block::BITS_PER_SLOT); //TODO: figure out how to make this configurable
let qbits = Metadata::calculate_qbits(n, rbits);
let total_slots = 1usize << qbits; //2^qbits slots in the filter
let nblocks = (total_slots + block::SLOTS_PER_BLOCK - 1) / block::SLOTS_PER_BLOCK;
//Conservatively, set the maximum number of elements to 95% of the total capacity
//Realistically this structure can go higher than that but there starts to be a performance
//penalty and it's better to resize at that point
let max_slots = ((total_slots as f64) * 0.95) as usize;
return Metadata {
n,
rbits,
qbits,
nblocks,
max_slots,
nslots: total_slots,
..Default::default()
};
}
/// Given the insert count `n` and the remainder bits `rbits`, calculates the quotient size
/// `qbits` which will provide a false positive rate of no worse than `1/(2^rbits - 1)`
fn calculate_qbits(n: usize, rbits: usize) -> usize {
assert!(rbits > 1);
assert!(n > 0);
let sigma = 2.0f64.powi(-(rbits as i32));
let p = ((n as f64) / sigma).log2().ceil() as usize;
assert!(p > rbits);
let qbits = p - rbits;
qbits
}
}
#[cfg(test)]
mod metadata_tests {
use super::*;
#[test]
#[should_panic]
fn panics_on_invalid_rbits() {
Metadata::from_n_and_r(10000, 8);
}
#[test]
fn computes_valid_q_for_n_and_r() {
// Test data data values were computed from a Google Sheet using formulae from the RSQF
// paper
let test_data = [
// (n, r, expected_q)
(100_000_usize, 6_usize, 17),
(1_000_000_usize, 6_usize, 20),
(10_000_000_usize, 6_usize, 24),
(100_000_usize, 8_usize, 17),
(1_000_000_usize, 8_usize, 20),
(10_000_000_usize, 8_usize, 24),
(100_000_usize, 9_usize, 17),
(1_000_000_usize, 9_usize, 20),
(10_000_000_usize, 9_usize, 24),
];
for (n, r, expected_qbits) in test_data.into_iter() {
let q = Metadata::calculate_qbits(*n, *r);
assert_eq!(*expected_qbits, q, "n={} r={}", *n, *r);
}
}
#[test]
fn computes_valid_metadata_for_n_and_r() {
let test_data = [
// (n, r, expected_qbits, expected_nslots)
(10_000_usize, 9_usize, 14, 1usize << 14),
];
for (n, r, expected_qbits, expected_nslots) in test_data.into_iter() {
let meta = Metadata::from_n_and_r(*n, *r);
assert_eq!(meta.n, *n);
assert_eq!(meta.rbits, *r);
assert_eq!(meta.qbits, *expected_qbits);
assert_eq!(meta.nslots, *expected_nslots);
assert_eq!(meta.nblocks, (meta.nslots + 64 - 1) / 64);
assert_eq!(meta.noccupied_slots, 0);
assert_eq!(meta.nelements, 0);
assert_eq!(meta.ndistinct_elements, 0);
assert_eq!(meta.max_slots, ((meta.nslots as f64) * 0.95) as usize);
}
}
} | #[derive(Default, PartialEq)] | random_line_split |
rsqf.rs | use block;
use logical;
use murmur::Murmur3Hash;
#[allow(dead_code)] // for now
pub struct RSQF {
meta: Metadata,
logical: logical::LogicalData,
}
#[allow(dead_code)] // for now
#[derive(Default, PartialEq)]
struct Metadata {
n: usize,
qbits: usize,
rbits: usize,
nblocks: usize,
nelements: usize,
ndistinct_elements: usize,
nslots: usize,
noccupied_slots: usize,
max_slots: usize,
}
/// Standard filter result type, on success returns a count on error returns a message
/// This should probably be richer over time
type FilterResult = Result<usize, &'static str>;
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl RSQF {
pub fn new(n: usize, rbits: usize) -> RSQF {
RSQF::from_n_and_r(n, rbits)
}
/// Creates a structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> RSQF {
RSQF::from_metadata(Metadata::from_n_and_r(n, rbits))
}
/// Creates an instance of the filter given the description of the filter parameters stored in
/// a `Metadata` structure
fn from_metadata(meta: Metadata) -> RSQF {
let logical = logical::LogicalData::new(meta.nslots, meta.rbits);
return RSQF { meta, logical };
}
/// Queries the filter for the presence of `hash`.
///
/// If `hash` is not present, returns 0
/// If `hash` is likely to be present, returns an approximate count of the number of times
/// `hash` has been inserted. Note that this is approximate; it is possible that `hash` is
/// actually not present but a non-zero count is returned, with a probability no worse than
/// `2^-rbits`
pub fn get_count(&self, hash: Murmur3Hash) -> usize {
panic!("NYI");
}
/// Adds `count` to the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, it is added with `count` count.
/// If `hash` is present, `count` is added to its existing count.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn add_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
/// Increments the count of `hash` by one.
///
/// If `hash` is not present, it's added with a count of one.
/// If `hash` is present, its existing count is incremented.
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn inc_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.add_count(hash, 1);
}
/// Subtracts `count` from the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, an error is returned
/// If `hash` is present, `count` is subtracted from the existing count. The resulting count
/// is returned. If subtracting `count` from the existing count results in a value less than
/// 1, a resulting count of 0 is returned and `hash` is removed from the filter.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, which may be 0 in which case `hash` has
/// been removed from the filter, or an error if `hash` was not found in the filter
pub fn sub_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
pub fn dec_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.sub_count(hash, 1);
}
/// Given a Murmur3 hash as input, extracts the quotient `q` and remainder `r` which will be
/// used to look up this item in the filter.
///
/// Though both values are of type `u64`, the number of bits used in each is based on the size
/// (`n`) and false-positive rate (`rbits`) specified when the filter was created
fn get_q_and_r(&self, hash: Murmur3Hash) -> (u64, u64) {
//Use only the 64-bit hash and pull out the bits we'll use for q and r
let hash = hash.value64();
// To compute the quotient q for this hash, shift right to remove the bits to be used as
// the remainder r, then mask out q bits
let q = (hash.wrapping_shr(self.meta.rbits as u32)) & bitmask!(self.meta.qbits);
let r = hash & bitmask!(self.meta.rbits as u32);
(q, r)
}
}
#[cfg(test)]
mod rsqf_tests {
use super::*;
use murmur::Murmur3Hash;
#[test]
fn creates_empty_filter() {
let _filter = RSQF::new(10000, 9);
}
#[test]
#[should_panic]
fn | () {
RSQF::new(10000, 8);
}
#[test]
fn computes_valid_metadata() {
let filter = RSQF::new(10000, 9);
assert_eq!(filter.meta.n, 10000);
assert_eq!(filter.meta.rbits, 9);
assert_eq!(filter.meta.qbits, 14);
assert_eq!(filter.meta.nslots, 1usize << 14);
assert_eq!(filter.meta.nblocks, (filter.meta.nslots + 64 - 1) / 64);
assert_eq!(filter.meta.noccupied_slots, 0);
assert_eq!(filter.meta.nelements, 0);
assert_eq!(filter.meta.ndistinct_elements, 0);
assert_eq!(
filter.meta.max_slots,
((filter.meta.nslots as f64) * 0.95) as usize
);
}
#[test]
#[ignore]
fn get_count_nonexistent_item_returns_zero() {
let filter = RSQF::new(10000, 9);
assert_eq!(0, filter.get_count(Murmur3Hash::new(1)));
}
#[test]
fn get_q_and_r_returns_correct_results() {
let test_data = [
// (n, rbits, hash)
(30usize, 9usize, 0x0000_0000u128),
(30usize, 9usize, 0b0000_0001_1111_1111u128),
(30usize, 9usize, 0b1111_0001_1111_0000u128),
];
for (n, rbits, hash) in test_data.into_iter() {
let filter = RSQF::new(*n, *rbits);
println!(
"n={} qbits={} rbits={} hash={:x}",
n, filter.meta.qbits, rbits, hash
);
let hash = Murmur3Hash::new(*hash);
let (q, r) = filter.get_q_and_r(hash);
println!("q={:x}", q);
println!("r={:x}", r);
let rbitmask = u128::max_value() >> (128 - *rbits);
let qbitmask = u128::max_value() >> (128 - filter.meta.qbits);
//The lower rbits bits of the hash should be r
assert_eq!(hash.value128() & rbitmask, r as u128);
assert_eq!((hash.value128() >> rbits) & qbitmask, q as u128);
}
}
}
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl Metadata {
/// Creates a metadata structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> Metadata {
assert!(block::SLOTS_PER_BLOCK == 64usize); //this code assumes 64 slots per block always
assert!(rbits as usize == block::BITS_PER_SLOT); //TODO: figure out how to make this configurable
let qbits = Metadata::calculate_qbits(n, rbits);
let total_slots = 1usize << qbits; //2^qbits slots in the filter
let nblocks = (total_slots + block::SLOTS_PER_BLOCK - 1) / block::SLOTS_PER_BLOCK;
//Conservatively, set the maximum number of elements to 95% of the total capacity
//Realistically this structure can go higher than that but there starts to be a performance
//penalty and it's better to resize at that point
let max_slots = ((total_slots as f64) * 0.95) as usize;
return Metadata {
n,
rbits,
qbits,
nblocks,
max_slots,
nslots: total_slots,
..Default::default()
};
}
/// Given the insert count `n` and the remainder bits `rbits`, calculates the quotient size
/// `qbits` which will provide a false positive rate of no worse than `1/(2^rbits - 1)`
fn calculate_qbits(n: usize, rbits: usize) -> usize {
assert!(rbits > 1);
assert!(n > 0);
let sigma = 2.0f64.powi(-(rbits as i32));
let p = ((n as f64) / sigma).log2().ceil() as usize;
assert!(p > rbits);
let qbits = p - rbits;
qbits
}
}
#[cfg(test)]
mod metadata_tests {
use super::*;
#[test]
#[should_panic]
fn panics_on_invalid_rbits() {
Metadata::from_n_and_r(10000, 8);
}
#[test]
fn computes_valid_q_for_n_and_r() {
// Test data data values were computed from a Google Sheet using formulae from the RSQF
// paper
let test_data = [
// (n, r, expected_q)
(100_000_usize, 6_usize, 17),
(1_000_000_usize, 6_usize, 20),
(10_000_000_usize, 6_usize, 24),
(100_000_usize, 8_usize, 17),
(1_000_000_usize, 8_usize, 20),
(10_000_000_usize, 8_usize, 24),
(100_000_usize, 9_usize, 17),
(1_000_000_usize, 9_usize, 20),
(10_000_000_usize, 9_usize, 24),
];
for (n, r, expected_qbits) in test_data.into_iter() {
let q = Metadata::calculate_qbits(*n, *r);
assert_eq!(*expected_qbits, q, "n={} r={}", *n, *r);
}
}
#[test]
fn computes_valid_metadata_for_n_and_r() {
let test_data = [
// (n, r, expected_qbits, expected_nslots)
(10_000_usize, 9_usize, 14, 1usize << 14),
];
for (n, r, expected_qbits, expected_nslots) in test_data.into_iter() {
let meta = Metadata::from_n_and_r(*n, *r);
assert_eq!(meta.n, *n);
assert_eq!(meta.rbits, *r);
assert_eq!(meta.qbits, *expected_qbits);
assert_eq!(meta.nslots, *expected_nslots);
assert_eq!(meta.nblocks, (meta.nslots + 64 - 1) / 64);
assert_eq!(meta.noccupied_slots, 0);
assert_eq!(meta.nelements, 0);
assert_eq!(meta.ndistinct_elements, 0);
assert_eq!(meta.max_slots, ((meta.nslots as f64) * 0.95) as usize);
}
}
}
| panics_on_invalid_r | identifier_name |
rsqf.rs | use block;
use logical;
use murmur::Murmur3Hash;
#[allow(dead_code)] // for now
pub struct RSQF {
meta: Metadata,
logical: logical::LogicalData,
}
#[allow(dead_code)] // for now
#[derive(Default, PartialEq)]
struct Metadata {
n: usize,
qbits: usize,
rbits: usize,
nblocks: usize,
nelements: usize,
ndistinct_elements: usize,
nslots: usize,
noccupied_slots: usize,
max_slots: usize,
}
/// Standard filter result type, on success returns a count on error returns a message
/// This should probably be richer over time
type FilterResult = Result<usize, &'static str>;
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl RSQF {
pub fn new(n: usize, rbits: usize) -> RSQF {
RSQF::from_n_and_r(n, rbits)
}
/// Creates a structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> RSQF {
RSQF::from_metadata(Metadata::from_n_and_r(n, rbits))
}
/// Creates an instance of the filter given the description of the filter parameters stored in
/// a `Metadata` structure
fn from_metadata(meta: Metadata) -> RSQF {
let logical = logical::LogicalData::new(meta.nslots, meta.rbits);
return RSQF { meta, logical };
}
/// Queries the filter for the presence of `hash`.
///
/// If `hash` is not present, returns 0
/// If `hash` is likely to be present, returns an approximate count of the number of times
/// `hash` has been inserted. Note that this is approximate; it is possible that `hash` is
/// actually not present but a non-zero count is returned, with a probability no worse than
/// `2^-rbits`
pub fn get_count(&self, hash: Murmur3Hash) -> usize {
panic!("NYI");
}
/// Adds `count` to the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, it is added with `count` count.
/// If `hash` is present, `count` is added to its existing count.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn add_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
/// Increments the count of `hash` by one.
///
/// If `hash` is not present, it's added with a count of one.
/// If `hash` is present, its existing count is incremented.
///
/// Returns the new total count of `hash` on success, or `Err` if the filter is already at max
/// capacity
pub fn inc_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.add_count(hash, 1);
}
/// Subtracts `count` from the total count for `hash` in the filter.
///
/// If `hash` is not present in the filter, an error is returned
/// If `hash` is present, `count` is subtracted from the existing count. The resulting count
/// is returned. If subtracting `count` from the existing count results in a value less than
/// 1, a resulting count of 0 is returned and `hash` is removed from the filter.
///
/// As with the `query` method it is possible `hash` collides with another hash value
///
/// Returns the new total count of `hash` on success, which may be 0 in which case `hash` has
/// been removed from the filter, or an error if `hash` was not found in the filter
pub fn sub_count(&self, hash: Murmur3Hash, count: usize) -> FilterResult {
panic!("NYI");
}
pub fn dec_count(&self, hash: Murmur3Hash) -> FilterResult {
return self.sub_count(hash, 1);
}
/// Given a Murmur3 hash as input, extracts the quotient `q` and remainder `r` which will be
/// used to look up this item in the filter.
///
/// Though both values are of type `u64`, the number of bits used in each is based on the size
/// (`n`) and false-positive rate (`rbits`) specified when the filter was created
fn get_q_and_r(&self, hash: Murmur3Hash) -> (u64, u64) {
//Use only the 64-bit hash and pull out the bits we'll use for q and r
let hash = hash.value64();
// To compute the quotient q for this hash, shift right to remove the bits to be used as
// the remainder r, then mask out q bits
let q = (hash.wrapping_shr(self.meta.rbits as u32)) & bitmask!(self.meta.qbits);
let r = hash & bitmask!(self.meta.rbits as u32);
(q, r)
}
}
#[cfg(test)]
mod rsqf_tests {
use super::*;
use murmur::Murmur3Hash;
#[test]
fn creates_empty_filter() {
let _filter = RSQF::new(10000, 9);
}
#[test]
#[should_panic]
fn panics_on_invalid_r() {
RSQF::new(10000, 8);
}
#[test]
fn computes_valid_metadata() {
let filter = RSQF::new(10000, 9);
assert_eq!(filter.meta.n, 10000);
assert_eq!(filter.meta.rbits, 9);
assert_eq!(filter.meta.qbits, 14);
assert_eq!(filter.meta.nslots, 1usize << 14);
assert_eq!(filter.meta.nblocks, (filter.meta.nslots + 64 - 1) / 64);
assert_eq!(filter.meta.noccupied_slots, 0);
assert_eq!(filter.meta.nelements, 0);
assert_eq!(filter.meta.ndistinct_elements, 0);
assert_eq!(
filter.meta.max_slots,
((filter.meta.nslots as f64) * 0.95) as usize
);
}
#[test]
#[ignore]
fn get_count_nonexistent_item_returns_zero() {
let filter = RSQF::new(10000, 9);
assert_eq!(0, filter.get_count(Murmur3Hash::new(1)));
}
#[test]
fn get_q_and_r_returns_correct_results() {
let test_data = [
// (n, rbits, hash)
(30usize, 9usize, 0x0000_0000u128),
(30usize, 9usize, 0b0000_0001_1111_1111u128),
(30usize, 9usize, 0b1111_0001_1111_0000u128),
];
for (n, rbits, hash) in test_data.into_iter() {
let filter = RSQF::new(*n, *rbits);
println!(
"n={} qbits={} rbits={} hash={:x}",
n, filter.meta.qbits, rbits, hash
);
let hash = Murmur3Hash::new(*hash);
let (q, r) = filter.get_q_and_r(hash);
println!("q={:x}", q);
println!("r={:x}", r);
let rbitmask = u128::max_value() >> (128 - *rbits);
let qbitmask = u128::max_value() >> (128 - filter.meta.qbits);
//The lower rbits bits of the hash should be r
assert_eq!(hash.value128() & rbitmask, r as u128);
assert_eq!((hash.value128() >> rbits) & qbitmask, q as u128);
}
}
}
#[allow(dead_code)] // for now
#[allow(unused_variables)] // for now
impl Metadata {
/// Creates a metadata structure for the filter based on `n` the number of expected elements
/// and `rbits` which specifies the false positive rate at `1/(2^rbits - 1)`
fn from_n_and_r(n: usize, rbits: usize) -> Metadata {
assert!(block::SLOTS_PER_BLOCK == 64usize); //this code assumes 64 slots per block always
assert!(rbits as usize == block::BITS_PER_SLOT); //TODO: figure out how to make this configurable
let qbits = Metadata::calculate_qbits(n, rbits);
let total_slots = 1usize << qbits; //2^qbits slots in the filter
let nblocks = (total_slots + block::SLOTS_PER_BLOCK - 1) / block::SLOTS_PER_BLOCK;
//Conservatively, set the maximum number of elements to 95% of the total capacity
//Realistically this structure can go higher than that but there starts to be a performance
//penalty and it's better to resize at that point
let max_slots = ((total_slots as f64) * 0.95) as usize;
return Metadata {
n,
rbits,
qbits,
nblocks,
max_slots,
nslots: total_slots,
..Default::default()
};
}
/// Given the insert count `n` and the remainder bits `rbits`, calculates the quotient size
/// `qbits` which will provide a false positive rate of no worse than `1/(2^rbits - 1)`
fn calculate_qbits(n: usize, rbits: usize) -> usize {
assert!(rbits > 1);
assert!(n > 0);
let sigma = 2.0f64.powi(-(rbits as i32));
let p = ((n as f64) / sigma).log2().ceil() as usize;
assert!(p > rbits);
let qbits = p - rbits;
qbits
}
}
#[cfg(test)]
mod metadata_tests {
use super::*;
#[test]
#[should_panic]
fn panics_on_invalid_rbits() {
Metadata::from_n_and_r(10000, 8);
}
#[test]
fn computes_valid_q_for_n_and_r() | }
#[test]
fn computes_valid_metadata_for_n_and_r() {
let test_data = [
// (n, r, expected_qbits, expected_nslots)
(10_000_usize, 9_usize, 14, 1usize << 14),
];
for (n, r, expected_qbits, expected_nslots) in test_data.into_iter() {
let meta = Metadata::from_n_and_r(*n, *r);
assert_eq!(meta.n, *n);
assert_eq!(meta.rbits, *r);
assert_eq!(meta.qbits, *expected_qbits);
assert_eq!(meta.nslots, *expected_nslots);
assert_eq!(meta.nblocks, (meta.nslots + 64 - 1) / 64);
assert_eq!(meta.noccupied_slots, 0);
assert_eq!(meta.nelements, 0);
assert_eq!(meta.ndistinct_elements, 0);
assert_eq!(meta.max_slots, ((meta.nslots as f64) * 0.95) as usize);
}
}
}
| {
// Test data data values were computed from a Google Sheet using formulae from the RSQF
// paper
let test_data = [
// (n, r, expected_q)
(100_000_usize, 6_usize, 17),
(1_000_000_usize, 6_usize, 20),
(10_000_000_usize, 6_usize, 24),
(100_000_usize, 8_usize, 17),
(1_000_000_usize, 8_usize, 20),
(10_000_000_usize, 8_usize, 24),
(100_000_usize, 9_usize, 17),
(1_000_000_usize, 9_usize, 20),
(10_000_000_usize, 9_usize, 24),
];
for (n, r, expected_qbits) in test_data.into_iter() {
let q = Metadata::calculate_qbits(*n, *r);
assert_eq!(*expected_qbits, q, "n={} r={}", *n, *r);
} | identifier_body |
lpc55_flash.rs |
/// Erases all non-secure flash. This MUST be done before writing!
#[clap(name = "flash-erase-all")]
FlashEraseAll,
/// Erases a portion of non-secure flash. This MUST be done before writing!
FlashEraseRegion {
#[arg(value_parser = parse_int::parse::<u32>)]
start_address: u32,
#[arg(value_parser = parse_int::parse::<u32>)]
byte_count: u32,
},
/// Write a file to the CMPA region
#[clap(name = "write-cmpa")]
WriteCMPA {
file: PathBuf,
},
/// Erase the CMPA region (use to boot non-secure binaries again)
#[clap(name = "erase-cmpa")]
EraseCMPA,
/// Save the CMPA region to a file
ReadCMPA {
/// Write to FILE, or stdout if omitted
file: Option<PathBuf>,
},
/// Save the CFPA region to a file
ReadCFPA {
#[clap(short, long)]
page: Option<CfpaChoice>,
file: PathBuf,
},
/// Write the CFPA region from the contents of a file.
WriteCFPA {
#[clap(short, long)]
update_version: bool,
file: PathBuf,
},
/// Put a minimalist program on to allow attaching via SWD
Restore,
/// Send SB update file
SendSBUpdate {
file: PathBuf,
},
/// Set up key store this involves
/// - Enroll
/// - Setting UDS
/// - Setting SBKEK
/// - Writing to persistent storage
SetupKeyStore {
file: PathBuf,
},
/// Trigger a new enrollment in the PUF
Enroll,
/// Generate a new device secret for use in DICE
GenerateUDS,
/// Write keystore to flash
WriteKeyStore,
/// Erase existing keystore
EraseKeyStore,
/// Set the SBKEK, required for SB Updates
SetSBKek {
file: PathBuf,
},
GetProperty {
#[arg(value_parser = BootloaderProperty::from_str)]
prop: BootloaderProperty,
},
LastError,
}
#[derive(Copy, Clone, Debug, clap::ValueEnum)]
enum CfpaChoice {
Scratch,
Ping,
Pong,
}
#[derive(Debug, Parser)]
#[clap(name = "isp")]
struct Isp {
/// UART port
#[clap(name = "port")]
port: String,
/// How fast to run the UART. 57,600 baud seems very reliable but is rather
/// slow. In certain test setups we've gotten rates of up to 1Mbaud to work
/// reliably -- your mileage may vary!
#[clap(short = 'b', default_value = "57600")]
baud_rate: u32,
#[clap(subcommand)]
cmd: ISPCommand,
}
fn | (prop: BootloaderProperty, params: Vec<u32>) {
match prop {
BootloaderProperty::BootloaderVersion => {
println!("Version {:x}", params[1]);
}
BootloaderProperty::AvailablePeripherals => {
println!("Bitmask of peripherals {:x}", params[1]);
}
BootloaderProperty::FlashStart => {
println!("Flash start = 0x{:x}", params[1]);
}
BootloaderProperty::FlashSize => {
println!("Flash Size = {:x}", params[1]);
}
BootloaderProperty::FlashSectorSize => {
println!("Flash Sector Size = {:x}", params[1]);
}
BootloaderProperty::AvailableCommands => {
println!("Bitmask of commands = {:x}", params[1]);
}
BootloaderProperty::CRCStatus => {
println!("CRC status = {}", params[1]);
}
BootloaderProperty::VerifyWrites => {
println!("Verify Writes (bool) {}", params[1]);
}
BootloaderProperty::MaxPacketSize => {
println!("Max Packet Size = {}", params[1]);
}
BootloaderProperty::ReservedRegions => {
println!("Reserved regions? = {:x?}", params);
}
BootloaderProperty::RAMStart => {
println!("RAM start = 0x{:x}", params[1]);
}
BootloaderProperty::RAMSize => {
println!("RAM size = 0x{:x}", params[1]);
}
BootloaderProperty::SystemDeviceID => {
println!("DEVICE_ID0 register = 0x{:x}", params[1]);
}
BootloaderProperty::SecurityState => {
println!(
"Security State = {}",
if params[1] == 0x5aa55aa5 {
"UNLOCKED"
} else {
"LOCKED"
}
);
}
BootloaderProperty::UniqueID => {
println!(
"UUID = {:x}{:x}{:x}{:x}",
params[1], params[2], params[3], params[4]
);
}
BootloaderProperty::TargetVersion => {
println!("Target version = {:x}", params[1]);
}
BootloaderProperty::FlashPageSize => {
println!("Flash page size = {:x}", params[1]);
}
BootloaderProperty::IRQPinStatus => {
println!("IRQ Pin Status = {}", params[1]);
}
BootloaderProperty::FFRKeyStoreStatus => {
println!("FFR Store Status = {}", params[1]);
}
}
}
fn pretty_print_error(params: Vec<u32>) {
let reason = params[1] & 0xfffffff0;
if reason == 0 {
println!("No errors reported");
} else if reason == 0x0602f300 {
println!("Passive boot failed, reason:");
let specific_reason = params[2] & 0xfffffff0;
match specific_reason {
0x0b36f300 => {
println!("Secure image authentication failed. Check:");
println!("- Is the image you are booting signed?");
println!("- Is the image signed with the corresponding key?");
}
0x0b37f300 => {
println!("Application CRC failed");
}
0x0b35f300 => {
println!("Application entry point and/or stack is invalid");
}
0x0b38f300 => {
println!("DICE failure. Check:");
println!("- Key store is set up properly (UDS)");
}
0x0d70f300 => {
println!("Trying to boot a TZ image on a device that doesn't have TZ!");
}
0x0d71f300 => {
println!("Error reading TZ Image type from CMPA");
}
0x0d72f300 => {
println!("Bad TZ image mode, check your image");
}
0x0c00f500 => {
println!("Application returned to the ROM?");
}
_ => {
println!("Some other reason, raw bytes: {:x?}", params);
}
}
} else {
println!("Something bad happen: {:x?}", params);
}
}
fn main() -> Result<()> {
let cmd = Isp::parse();
// The target _technically_ has autobaud but it's very flaky
// and these seem to be the preferred settings
//
// We initially set the timeout short so we can drain the incoming buffer in
// a portable manner below. We'll adjust it up after that.
let mut port = serialport::new(&cmd.port, cmd.baud_rate)
.timeout(Duration::from_millis(100))
.data_bits(DataBits::Eight)
.flow_control(FlowControl::None)
.parity(Parity::None)
.stop_bits(StopBits::One)
.open()?;
// Extract any bytes left over in the serial port driver from previous
// interaction.
loop {
let mut throwaway = [0; 16];
match port.read(&mut throwaway) {
Ok(0) => {
// This should only happen on nonblocking reads, which we
// haven't asked for, but it does mean the buffer is empty so
// treat it as success.
break;
}
Ok(_) => {
// We've collected some characters to throw away, keep going.
}
Err(e) if e.kind() == ErrorKind::TimedOut => {
// Buffer is empty!
break;
}
Err(e) => {
return Err(e.into());
}
}
}
// Crank the timeout back up.
port.set_timeout(Duration::from_secs(1))?;
match cmd.cmd {
ISPCommand::Ping => {
do_ping(&mut *port)?;
println!("ping success.");
}
ISPCommand::ReadMemory {
address,
count,
path,
} => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, address, count)?;
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&path)?;
out.write_all(&m)?;
println!("Output written to {:?}", path);
}
ISPCommand::WriteMemory { address, file } => {
do_ping(&mut *port)?;
println!("If you didn't already erase the flash this operation will fail!");
println!("This operation may take a while");
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, address, &infile)?;
println!("Write complete!");
}
ISPCommand::FlashEraseAll => {
do_ping(&mut *port)?;
do_isp_flash_erase_all(&mut *port)?;
println!("Flash erased!");
}
ISPCommand::FlashEraseRegion {
start_address,
byte_count,
} => {
do_ping(&mut *port)?;
do_isp_flash_erase_region(&mut *port, start_address, byte_count)?;
println!("Flash region erased!");
}
// Yes this is just another write-memory call but remembering addresses
// is hard.
ISPCommand::WriteCMPA { file } => {
do_ping(&mut *port)?;
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, 0x9e400, &infile)?;
println!("Write to CMPA done!");
}
ISPCommand::EraseCMPA => {
do_ping(&mut *port)?;
// Write 512 bytes of zero
let bytes = [0; 512];
do_isp_write_memory(&mut *port, 0x9e400, &bytes)?;
println!("CMPA region erased!");
println!("You can now boot unsigned images");
}
ISPCommand::ReadCMPA { file } => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let mut out = match file {
Some(ref path) => Box::new(
std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path)?,
) as Box<dyn Write>,
None => Box::new(std::io::stdout()) as Box<dyn Write>,
};
out.write_all(&m)?;
eprintln!("CMPA Output written to {:?}", file);
}
ISPCommand::ReadCFPA { page, file } => {
do_ping(&mut *port)?;
let data = if let Some(page) = page {
// Only read one page as requested
let addr = match page {
CfpaChoice::Scratch => 0x9de00,
CfpaChoice::Ping => 0x9e000,
CfpaChoice::Pong => 0x9e200,
};
do_isp_read_memory(&mut *port, addr, 512)?
} else {
// Read ping and pong pages and only write out the latest one.
let ping = do_isp_read_memory(&mut *port, 0x9e000, 512)
.context("reading CFPA ping page")?;
let pong = do_isp_read_memory(&mut *port, 0x9e200, 512)
.context("reading CFPA pong page")?;
let ping_d = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong_d = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"CFPA versions: ping={}, pong={}",
ping_d.version, pong_d.version
);
if ping_d.version > pong_d.version {
println!("choosing ping");
ping
} else {
println!("choosing pong");
pong
}
};
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&file)?;
out.write_all(&data)?;
println!("CFPA written to {file:?}");
}
ISPCommand::WriteCFPA {
update_version,
file,
} => {
do_ping(&mut *port)?;
let bytes = std::fs::read(file)?;
let mut new_cfpa = lpc55_areas::CFPAPage::from_bytes(
bytes[..].try_into().context("CFPA file is not 512 bytes")?,
)?;
// Read the CMPA so we can compare the two to try to avoid locking
// the user out of their chip.
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let cmpa = lpc55_areas::CMPAPage::from_bytes(m[..].try_into().unwrap())?;
if (new_cfpa.dcfg_cc_socu_ns_pin!= 0 || new_cfpa.dcfg_cc_socu_ns_dflt!= 0)
&& (cmpa.cc_socu_pin == 0 || cmpa.cc_socu_dflt == 0)
{
bail!(
"It looks like the CMPA debug settings aren't set but \
the CFPA settings are! This will brick the chip!"
);
// TODO I guess it's remotely possible that we might want an
// override for this check.
}
if update_version {
// Read the current CFPA areas to figure out what version we
// need to set.
let ping = do_isp_read_memory(&mut *port, 0x9_e000, 512)?;
let pong = do_isp_read_memory(&mut *port, 0x9_e200, 512)?;
let ping = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"ping sector v={}, pong sector v={}",
ping.version, pong.version
);
let start_version = u32::max(ping.version, pong.version);
new_cfpa.version = start_version + 1;
println!("note: updated version is {}", new_cfpa.version);
}
let new_bytes = new_cfpa.to_vec()?;
do_isp_write_memory(&mut *port, 0x9_de00, &new_bytes)?;
println!("Write to CFPA done!");
}
ISPCommand::Restore => {
do_ping(&mut *port)?;
println!("Erasing flash");
do_isp_flash_erase_all(&mut *port)?;
println!("Erasing done.");
// we need to fill 0x134 bytes to cover the vector table
// plus all interrupts
let mut bytes: [u8; 0x134] = [0u8; 0x134];
// Choose a RAM address for the stack (we shouldn't use the stack
// but it should be valid anyway)
byteorder::LittleEndian::write_u32(&mut bytes[0x0..0x4], 0x20004000);
// Everything else targets the loop to branch instruction at 0x00000130
let mut offset = 4;
while offset < 0x130 {
byteorder::LittleEndian::write_u32(&mut bytes[offset..offset + 4], 0x00000131);
offset += 4;
}
// This is two branch to self instructions
byteorder::LittleEndian::write_u32(&mut bytes[0x130..0x134], 0xe7fee7fe);
println!("Writing bytes");
do_isp_write_memory(&mut *port, 0x0, &bytes)?;
println!("Restore done! SWD should work now.");
}
ISPCommand::SendSBUpdate { file } => {
do_ping(&mut *port)?;
println!("Sending SB file, this may take a while");
let infile = std::fs::read(file)?;
do_recv_sb_file(&mut *port, &infile)?;
println!("Send complete!");
}
ISPCommand::Enroll => {
do_ping(&mut *port)?;
println!("Generating new activation code");
do_enroll(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::GenerateUDS => {
do_ping(&mut *port)?;
println!("Generating new UDS");
do_generate_uds(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::WriteKeyStore => {
do_ping(&mut *port)?;
println!("Writing key store to flash");
do_save_keystore(&mut *port)?;
println!("done.");
}
| pretty_print_bootloader_prop | identifier_name |
lpc55_flash.rs |
/// Erases all non-secure flash. This MUST be done before writing!
#[clap(name = "flash-erase-all")]
FlashEraseAll,
/// Erases a portion of non-secure flash. This MUST be done before writing!
FlashEraseRegion {
#[arg(value_parser = parse_int::parse::<u32>)]
start_address: u32,
#[arg(value_parser = parse_int::parse::<u32>)]
byte_count: u32,
},
/// Write a file to the CMPA region
#[clap(name = "write-cmpa")]
WriteCMPA {
file: PathBuf,
},
/// Erase the CMPA region (use to boot non-secure binaries again)
#[clap(name = "erase-cmpa")]
EraseCMPA,
/// Save the CMPA region to a file
ReadCMPA {
/// Write to FILE, or stdout if omitted
file: Option<PathBuf>,
},
/// Save the CFPA region to a file
ReadCFPA {
#[clap(short, long)]
page: Option<CfpaChoice>,
file: PathBuf,
},
/// Write the CFPA region from the contents of a file.
WriteCFPA {
#[clap(short, long)]
update_version: bool,
file: PathBuf,
},
/// Put a minimalist program on to allow attaching via SWD
Restore,
/// Send SB update file
SendSBUpdate {
file: PathBuf,
},
/// Set up key store this involves
/// - Enroll
/// - Setting UDS
/// - Setting SBKEK
/// - Writing to persistent storage
SetupKeyStore {
file: PathBuf,
},
/// Trigger a new enrollment in the PUF
Enroll,
/// Generate a new device secret for use in DICE
GenerateUDS,
/// Write keystore to flash
WriteKeyStore,
/// Erase existing keystore
EraseKeyStore,
/// Set the SBKEK, required for SB Updates
SetSBKek {
file: PathBuf,
},
GetProperty {
#[arg(value_parser = BootloaderProperty::from_str)]
prop: BootloaderProperty,
},
LastError,
}
#[derive(Copy, Clone, Debug, clap::ValueEnum)]
enum CfpaChoice {
Scratch,
Ping,
Pong,
}
#[derive(Debug, Parser)]
#[clap(name = "isp")]
struct Isp {
/// UART port
#[clap(name = "port")]
port: String,
/// How fast to run the UART. 57,600 baud seems very reliable but is rather
/// slow. In certain test setups we've gotten rates of up to 1Mbaud to work
/// reliably -- your mileage may vary!
#[clap(short = 'b', default_value = "57600")]
baud_rate: u32,
#[clap(subcommand)]
cmd: ISPCommand,
}
fn pretty_print_bootloader_prop(prop: BootloaderProperty, params: Vec<u32>) | BootloaderProperty::CRCStatus => {
println!("CRC status = {}", params[1]);
}
BootloaderProperty::VerifyWrites => {
println!("Verify Writes (bool) {}", params[1]);
}
BootloaderProperty::MaxPacketSize => {
println!("Max Packet Size = {}", params[1]);
}
BootloaderProperty::ReservedRegions => {
println!("Reserved regions? = {:x?}", params);
}
BootloaderProperty::RAMStart => {
println!("RAM start = 0x{:x}", params[1]);
}
BootloaderProperty::RAMSize => {
println!("RAM size = 0x{:x}", params[1]);
}
BootloaderProperty::SystemDeviceID => {
println!("DEVICE_ID0 register = 0x{:x}", params[1]);
}
BootloaderProperty::SecurityState => {
println!(
"Security State = {}",
if params[1] == 0x5aa55aa5 {
"UNLOCKED"
} else {
"LOCKED"
}
);
}
BootloaderProperty::UniqueID => {
println!(
"UUID = {:x}{:x}{:x}{:x}",
params[1], params[2], params[3], params[4]
);
}
BootloaderProperty::TargetVersion => {
println!("Target version = {:x}", params[1]);
}
BootloaderProperty::FlashPageSize => {
println!("Flash page size = {:x}", params[1]);
}
BootloaderProperty::IRQPinStatus => {
println!("IRQ Pin Status = {}", params[1]);
}
BootloaderProperty::FFRKeyStoreStatus => {
println!("FFR Store Status = {}", params[1]);
}
}
}
fn pretty_print_error(params: Vec<u32>) {
let reason = params[1] & 0xfffffff0;
if reason == 0 {
println!("No errors reported");
} else if reason == 0x0602f300 {
println!("Passive boot failed, reason:");
let specific_reason = params[2] & 0xfffffff0;
match specific_reason {
0x0b36f300 => {
println!("Secure image authentication failed. Check:");
println!("- Is the image you are booting signed?");
println!("- Is the image signed with the corresponding key?");
}
0x0b37f300 => {
println!("Application CRC failed");
}
0x0b35f300 => {
println!("Application entry point and/or stack is invalid");
}
0x0b38f300 => {
println!("DICE failure. Check:");
println!("- Key store is set up properly (UDS)");
}
0x0d70f300 => {
println!("Trying to boot a TZ image on a device that doesn't have TZ!");
}
0x0d71f300 => {
println!("Error reading TZ Image type from CMPA");
}
0x0d72f300 => {
println!("Bad TZ image mode, check your image");
}
0x0c00f500 => {
println!("Application returned to the ROM?");
}
_ => {
println!("Some other reason, raw bytes: {:x?}", params);
}
}
} else {
println!("Something bad happen: {:x?}", params);
}
}
fn main() -> Result<()> {
let cmd = Isp::parse();
// The target _technically_ has autobaud but it's very flaky
// and these seem to be the preferred settings
//
// We initially set the timeout short so we can drain the incoming buffer in
// a portable manner below. We'll adjust it up after that.
let mut port = serialport::new(&cmd.port, cmd.baud_rate)
.timeout(Duration::from_millis(100))
.data_bits(DataBits::Eight)
.flow_control(FlowControl::None)
.parity(Parity::None)
.stop_bits(StopBits::One)
.open()?;
// Extract any bytes left over in the serial port driver from previous
// interaction.
loop {
let mut throwaway = [0; 16];
match port.read(&mut throwaway) {
Ok(0) => {
// This should only happen on nonblocking reads, which we
// haven't asked for, but it does mean the buffer is empty so
// treat it as success.
break;
}
Ok(_) => {
// We've collected some characters to throw away, keep going.
}
Err(e) if e.kind() == ErrorKind::TimedOut => {
// Buffer is empty!
break;
}
Err(e) => {
return Err(e.into());
}
}
}
// Crank the timeout back up.
port.set_timeout(Duration::from_secs(1))?;
match cmd.cmd {
ISPCommand::Ping => {
do_ping(&mut *port)?;
println!("ping success.");
}
ISPCommand::ReadMemory {
address,
count,
path,
} => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, address, count)?;
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&path)?;
out.write_all(&m)?;
println!("Output written to {:?}", path);
}
ISPCommand::WriteMemory { address, file } => {
do_ping(&mut *port)?;
println!("If you didn't already erase the flash this operation will fail!");
println!("This operation may take a while");
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, address, &infile)?;
println!("Write complete!");
}
ISPCommand::FlashEraseAll => {
do_ping(&mut *port)?;
do_isp_flash_erase_all(&mut *port)?;
println!("Flash erased!");
}
ISPCommand::FlashEraseRegion {
start_address,
byte_count,
} => {
do_ping(&mut *port)?;
do_isp_flash_erase_region(&mut *port, start_address, byte_count)?;
println!("Flash region erased!");
}
// Yes this is just another write-memory call but remembering addresses
// is hard.
ISPCommand::WriteCMPA { file } => {
do_ping(&mut *port)?;
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, 0x9e400, &infile)?;
println!("Write to CMPA done!");
}
ISPCommand::EraseCMPA => {
do_ping(&mut *port)?;
// Write 512 bytes of zero
let bytes = [0; 512];
do_isp_write_memory(&mut *port, 0x9e400, &bytes)?;
println!("CMPA region erased!");
println!("You can now boot unsigned images");
}
ISPCommand::ReadCMPA { file } => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let mut out = match file {
Some(ref path) => Box::new(
std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path)?,
) as Box<dyn Write>,
None => Box::new(std::io::stdout()) as Box<dyn Write>,
};
out.write_all(&m)?;
eprintln!("CMPA Output written to {:?}", file);
}
ISPCommand::ReadCFPA { page, file } => {
do_ping(&mut *port)?;
let data = if let Some(page) = page {
// Only read one page as requested
let addr = match page {
CfpaChoice::Scratch => 0x9de00,
CfpaChoice::Ping => 0x9e000,
CfpaChoice::Pong => 0x9e200,
};
do_isp_read_memory(&mut *port, addr, 512)?
} else {
// Read ping and pong pages and only write out the latest one.
let ping = do_isp_read_memory(&mut *port, 0x9e000, 512)
.context("reading CFPA ping page")?;
let pong = do_isp_read_memory(&mut *port, 0x9e200, 512)
.context("reading CFPA pong page")?;
let ping_d = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong_d = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"CFPA versions: ping={}, pong={}",
ping_d.version, pong_d.version
);
if ping_d.version > pong_d.version {
println!("choosing ping");
ping
} else {
println!("choosing pong");
pong
}
};
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&file)?;
out.write_all(&data)?;
println!("CFPA written to {file:?}");
}
ISPCommand::WriteCFPA {
update_version,
file,
} => {
do_ping(&mut *port)?;
let bytes = std::fs::read(file)?;
let mut new_cfpa = lpc55_areas::CFPAPage::from_bytes(
bytes[..].try_into().context("CFPA file is not 512 bytes")?,
)?;
// Read the CMPA so we can compare the two to try to avoid locking
// the user out of their chip.
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let cmpa = lpc55_areas::CMPAPage::from_bytes(m[..].try_into().unwrap())?;
if (new_cfpa.dcfg_cc_socu_ns_pin!= 0 || new_cfpa.dcfg_cc_socu_ns_dflt!= 0)
&& (cmpa.cc_socu_pin == 0 || cmpa.cc_socu_dflt == 0)
{
bail!(
"It looks like the CMPA debug settings aren't set but \
the CFPA settings are! This will brick the chip!"
);
// TODO I guess it's remotely possible that we might want an
// override for this check.
}
if update_version {
// Read the current CFPA areas to figure out what version we
// need to set.
let ping = do_isp_read_memory(&mut *port, 0x9_e000, 512)?;
let pong = do_isp_read_memory(&mut *port, 0x9_e200, 512)?;
let ping = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"ping sector v={}, pong sector v={}",
ping.version, pong.version
);
let start_version = u32::max(ping.version, pong.version);
new_cfpa.version = start_version + 1;
println!("note: updated version is {}", new_cfpa.version);
}
let new_bytes = new_cfpa.to_vec()?;
do_isp_write_memory(&mut *port, 0x9_de00, &new_bytes)?;
println!("Write to CFPA done!");
}
ISPCommand::Restore => {
do_ping(&mut *port)?;
println!("Erasing flash");
do_isp_flash_erase_all(&mut *port)?;
println!("Erasing done.");
// we need to fill 0x134 bytes to cover the vector table
// plus all interrupts
let mut bytes: [u8; 0x134] = [0u8; 0x134];
// Choose a RAM address for the stack (we shouldn't use the stack
// but it should be valid anyway)
byteorder::LittleEndian::write_u32(&mut bytes[0x0..0x4], 0x20004000);
// Everything else targets the loop to branch instruction at 0x00000130
let mut offset = 4;
while offset < 0x130 {
byteorder::LittleEndian::write_u32(&mut bytes[offset..offset + 4], 0x00000131);
offset += 4;
}
// This is two branch to self instructions
byteorder::LittleEndian::write_u32(&mut bytes[0x130..0x134], 0xe7fee7fe);
println!("Writing bytes");
do_isp_write_memory(&mut *port, 0x0, &bytes)?;
println!("Restore done! SWD should work now.");
}
ISPCommand::SendSBUpdate { file } => {
do_ping(&mut *port)?;
println!("Sending SB file, this may take a while");
let infile = std::fs::read(file)?;
do_recv_sb_file(&mut *port, &infile)?;
println!("Send complete!");
}
ISPCommand::Enroll => {
do_ping(&mut *port)?;
println!("Generating new activation code");
do_enroll(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::GenerateUDS => {
do_ping(&mut *port)?;
println!("Generating new UDS");
do_generate_uds(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::WriteKeyStore => {
do_ping(&mut *port)?;
println!("Writing key store to flash");
do_save_keystore(&mut *port)?;
println!("done.");
}
| {
match prop {
BootloaderProperty::BootloaderVersion => {
println!("Version {:x}", params[1]);
}
BootloaderProperty::AvailablePeripherals => {
println!("Bitmask of peripherals {:x}", params[1]);
}
BootloaderProperty::FlashStart => {
println!("Flash start = 0x{:x}", params[1]);
}
BootloaderProperty::FlashSize => {
println!("Flash Size = {:x}", params[1]);
}
BootloaderProperty::FlashSectorSize => {
println!("Flash Sector Size = {:x}", params[1]);
}
BootloaderProperty::AvailableCommands => {
println!("Bitmask of commands = {:x}", params[1]);
} | identifier_body |
lpc55_flash.rs |
/// Erases all non-secure flash. This MUST be done before writing!
#[clap(name = "flash-erase-all")]
FlashEraseAll,
/// Erases a portion of non-secure flash. This MUST be done before writing!
FlashEraseRegion {
#[arg(value_parser = parse_int::parse::<u32>)]
start_address: u32,
#[arg(value_parser = parse_int::parse::<u32>)]
byte_count: u32,
},
/// Write a file to the CMPA region
#[clap(name = "write-cmpa")]
WriteCMPA {
file: PathBuf,
},
/// Erase the CMPA region (use to boot non-secure binaries again)
#[clap(name = "erase-cmpa")]
EraseCMPA,
/// Save the CMPA region to a file
ReadCMPA {
/// Write to FILE, or stdout if omitted
file: Option<PathBuf>,
},
/// Save the CFPA region to a file
ReadCFPA {
#[clap(short, long)]
page: Option<CfpaChoice>,
file: PathBuf,
},
/// Write the CFPA region from the contents of a file.
WriteCFPA {
#[clap(short, long)]
update_version: bool,
file: PathBuf,
},
/// Put a minimalist program on to allow attaching via SWD
Restore,
/// Send SB update file
SendSBUpdate {
file: PathBuf,
},
/// Set up key store this involves
/// - Enroll
/// - Setting UDS
/// - Setting SBKEK
/// - Writing to persistent storage
SetupKeyStore {
file: PathBuf,
},
/// Trigger a new enrollment in the PUF
Enroll,
/// Generate a new device secret for use in DICE
GenerateUDS,
/// Write keystore to flash
WriteKeyStore,
/// Erase existing keystore
EraseKeyStore,
/// Set the SBKEK, required for SB Updates
SetSBKek {
file: PathBuf,
},
GetProperty {
#[arg(value_parser = BootloaderProperty::from_str)]
prop: BootloaderProperty,
},
LastError,
}
#[derive(Copy, Clone, Debug, clap::ValueEnum)]
enum CfpaChoice {
Scratch,
Ping,
Pong,
}
#[derive(Debug, Parser)]
#[clap(name = "isp")]
struct Isp {
/// UART port
#[clap(name = "port")]
port: String,
/// How fast to run the UART. 57,600 baud seems very reliable but is rather
/// slow. In certain test setups we've gotten rates of up to 1Mbaud to work
/// reliably -- your mileage may vary!
#[clap(short = 'b', default_value = "57600")]
baud_rate: u32,
#[clap(subcommand)]
cmd: ISPCommand,
}
fn pretty_print_bootloader_prop(prop: BootloaderProperty, params: Vec<u32>) {
match prop {
BootloaderProperty::BootloaderVersion => {
println!("Version {:x}", params[1]);
}
BootloaderProperty::AvailablePeripherals => {
println!("Bitmask of peripherals {:x}", params[1]);
}
BootloaderProperty::FlashStart => {
println!("Flash start = 0x{:x}", params[1]);
}
BootloaderProperty::FlashSize => {
println!("Flash Size = {:x}", params[1]);
}
BootloaderProperty::FlashSectorSize => {
println!("Flash Sector Size = {:x}", params[1]);
}
BootloaderProperty::AvailableCommands => {
println!("Bitmask of commands = {:x}", params[1]);
}
BootloaderProperty::CRCStatus => {
println!("CRC status = {}", params[1]);
}
BootloaderProperty::VerifyWrites => {
println!("Verify Writes (bool) {}", params[1]);
}
BootloaderProperty::MaxPacketSize => {
println!("Max Packet Size = {}", params[1]);
}
BootloaderProperty::ReservedRegions => {
println!("Reserved regions? = {:x?}", params);
}
BootloaderProperty::RAMStart => {
println!("RAM start = 0x{:x}", params[1]);
}
BootloaderProperty::RAMSize => {
println!("RAM size = 0x{:x}", params[1]);
}
BootloaderProperty::SystemDeviceID => {
println!("DEVICE_ID0 register = 0x{:x}", params[1]);
}
BootloaderProperty::SecurityState => {
println!(
"Security State = {}",
if params[1] == 0x5aa55aa5 {
"UNLOCKED"
} else {
"LOCKED"
}
);
}
BootloaderProperty::UniqueID => {
println!(
"UUID = {:x}{:x}{:x}{:x}",
params[1], params[2], params[3], params[4]
);
}
BootloaderProperty::TargetVersion => {
println!("Target version = {:x}", params[1]);
}
BootloaderProperty::FlashPageSize => {
println!("Flash page size = {:x}", params[1]);
}
BootloaderProperty::IRQPinStatus => {
println!("IRQ Pin Status = {}", params[1]);
}
BootloaderProperty::FFRKeyStoreStatus => {
println!("FFR Store Status = {}", params[1]);
}
}
}
fn pretty_print_error(params: Vec<u32>) {
let reason = params[1] & 0xfffffff0;
if reason == 0 {
println!("No errors reported");
} else if reason == 0x0602f300 {
println!("Passive boot failed, reason:");
let specific_reason = params[2] & 0xfffffff0;
match specific_reason {
0x0b36f300 => {
println!("Secure image authentication failed. Check:");
println!("- Is the image you are booting signed?");
println!("- Is the image signed with the corresponding key?");
}
0x0b37f300 => {
println!("Application CRC failed");
}
0x0b35f300 => |
0x0b38f300 => {
println!("DICE failure. Check:");
println!("- Key store is set up properly (UDS)");
}
0x0d70f300 => {
println!("Trying to boot a TZ image on a device that doesn't have TZ!");
}
0x0d71f300 => {
println!("Error reading TZ Image type from CMPA");
}
0x0d72f300 => {
println!("Bad TZ image mode, check your image");
}
0x0c00f500 => {
println!("Application returned to the ROM?");
}
_ => {
println!("Some other reason, raw bytes: {:x?}", params);
}
}
} else {
println!("Something bad happen: {:x?}", params);
}
}
fn main() -> Result<()> {
let cmd = Isp::parse();
// The target _technically_ has autobaud but it's very flaky
// and these seem to be the preferred settings
//
// We initially set the timeout short so we can drain the incoming buffer in
// a portable manner below. We'll adjust it up after that.
let mut port = serialport::new(&cmd.port, cmd.baud_rate)
.timeout(Duration::from_millis(100))
.data_bits(DataBits::Eight)
.flow_control(FlowControl::None)
.parity(Parity::None)
.stop_bits(StopBits::One)
.open()?;
// Extract any bytes left over in the serial port driver from previous
// interaction.
loop {
let mut throwaway = [0; 16];
match port.read(&mut throwaway) {
Ok(0) => {
// This should only happen on nonblocking reads, which we
// haven't asked for, but it does mean the buffer is empty so
// treat it as success.
break;
}
Ok(_) => {
// We've collected some characters to throw away, keep going.
}
Err(e) if e.kind() == ErrorKind::TimedOut => {
// Buffer is empty!
break;
}
Err(e) => {
return Err(e.into());
}
}
}
// Crank the timeout back up.
port.set_timeout(Duration::from_secs(1))?;
match cmd.cmd {
ISPCommand::Ping => {
do_ping(&mut *port)?;
println!("ping success.");
}
ISPCommand::ReadMemory {
address,
count,
path,
} => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, address, count)?;
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&path)?;
out.write_all(&m)?;
println!("Output written to {:?}", path);
}
ISPCommand::WriteMemory { address, file } => {
do_ping(&mut *port)?;
println!("If you didn't already erase the flash this operation will fail!");
println!("This operation may take a while");
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, address, &infile)?;
println!("Write complete!");
}
ISPCommand::FlashEraseAll => {
do_ping(&mut *port)?;
do_isp_flash_erase_all(&mut *port)?;
println!("Flash erased!");
}
ISPCommand::FlashEraseRegion {
start_address,
byte_count,
} => {
do_ping(&mut *port)?;
do_isp_flash_erase_region(&mut *port, start_address, byte_count)?;
println!("Flash region erased!");
}
// Yes this is just another write-memory call but remembering addresses
// is hard.
ISPCommand::WriteCMPA { file } => {
do_ping(&mut *port)?;
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, 0x9e400, &infile)?;
println!("Write to CMPA done!");
}
ISPCommand::EraseCMPA => {
do_ping(&mut *port)?;
// Write 512 bytes of zero
let bytes = [0; 512];
do_isp_write_memory(&mut *port, 0x9e400, &bytes)?;
println!("CMPA region erased!");
println!("You can now boot unsigned images");
}
ISPCommand::ReadCMPA { file } => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let mut out = match file {
Some(ref path) => Box::new(
std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path)?,
) as Box<dyn Write>,
None => Box::new(std::io::stdout()) as Box<dyn Write>,
};
out.write_all(&m)?;
eprintln!("CMPA Output written to {:?}", file);
}
ISPCommand::ReadCFPA { page, file } => {
do_ping(&mut *port)?;
let data = if let Some(page) = page {
// Only read one page as requested
let addr = match page {
CfpaChoice::Scratch => 0x9de00,
CfpaChoice::Ping => 0x9e000,
CfpaChoice::Pong => 0x9e200,
};
do_isp_read_memory(&mut *port, addr, 512)?
} else {
// Read ping and pong pages and only write out the latest one.
let ping = do_isp_read_memory(&mut *port, 0x9e000, 512)
.context("reading CFPA ping page")?;
let pong = do_isp_read_memory(&mut *port, 0x9e200, 512)
.context("reading CFPA pong page")?;
let ping_d = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong_d = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"CFPA versions: ping={}, pong={}",
ping_d.version, pong_d.version
);
if ping_d.version > pong_d.version {
println!("choosing ping");
ping
} else {
println!("choosing pong");
pong
}
};
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&file)?;
out.write_all(&data)?;
println!("CFPA written to {file:?}");
}
ISPCommand::WriteCFPA {
update_version,
file,
} => {
do_ping(&mut *port)?;
let bytes = std::fs::read(file)?;
let mut new_cfpa = lpc55_areas::CFPAPage::from_bytes(
bytes[..].try_into().context("CFPA file is not 512 bytes")?,
)?;
// Read the CMPA so we can compare the two to try to avoid locking
// the user out of their chip.
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let cmpa = lpc55_areas::CMPAPage::from_bytes(m[..].try_into().unwrap())?;
if (new_cfpa.dcfg_cc_socu_ns_pin!= 0 || new_cfpa.dcfg_cc_socu_ns_dflt!= 0)
&& (cmpa.cc_socu_pin == 0 || cmpa.cc_socu_dflt == 0)
{
bail!(
"It looks like the CMPA debug settings aren't set but \
the CFPA settings are! This will brick the chip!"
);
// TODO I guess it's remotely possible that we might want an
// override for this check.
}
if update_version {
// Read the current CFPA areas to figure out what version we
// need to set.
let ping = do_isp_read_memory(&mut *port, 0x9_e000, 512)?;
let pong = do_isp_read_memory(&mut *port, 0x9_e200, 512)?;
let ping = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"ping sector v={}, pong sector v={}",
ping.version, pong.version
);
let start_version = u32::max(ping.version, pong.version);
new_cfpa.version = start_version + 1;
println!("note: updated version is {}", new_cfpa.version);
}
let new_bytes = new_cfpa.to_vec()?;
do_isp_write_memory(&mut *port, 0x9_de00, &new_bytes)?;
println!("Write to CFPA done!");
}
ISPCommand::Restore => {
do_ping(&mut *port)?;
println!("Erasing flash");
do_isp_flash_erase_all(&mut *port)?;
println!("Erasing done.");
// we need to fill 0x134 bytes to cover the vector table
// plus all interrupts
let mut bytes: [u8; 0x134] = [0u8; 0x134];
// Choose a RAM address for the stack (we shouldn't use the stack
// but it should be valid anyway)
byteorder::LittleEndian::write_u32(&mut bytes[0x0..0x4], 0x20004000);
// Everything else targets the loop to branch instruction at 0x00000130
let mut offset = 4;
while offset < 0x130 {
byteorder::LittleEndian::write_u32(&mut bytes[offset..offset + 4], 0x00000131);
offset += 4;
}
// This is two branch to self instructions
byteorder::LittleEndian::write_u32(&mut bytes[0x130..0x134], 0xe7fee7fe);
println!("Writing bytes");
do_isp_write_memory(&mut *port, 0x0, &bytes)?;
println!("Restore done! SWD should work now.");
}
ISPCommand::SendSBUpdate { file } => {
do_ping(&mut *port)?;
println!("Sending SB file, this may take a while");
let infile = std::fs::read(file)?;
do_recv_sb_file(&mut *port, &infile)?;
println!("Send complete!");
}
ISPCommand::Enroll => {
do_ping(&mut *port)?;
println!("Generating new activation code");
do_enroll(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::GenerateUDS => {
do_ping(&mut *port)?;
println!("Generating new UDS");
do_generate_uds(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::WriteKeyStore => {
do_ping(&mut *port)?;
println!("Writing key store to flash");
do_save_keystore(&mut *port)?;
println!("done.");
}
| {
println!("Application entry point and/or stack is invalid");
} | conditional_block |
lpc55_flash.rs | },
/// Erases all non-secure flash. This MUST be done before writing!
#[clap(name = "flash-erase-all")]
FlashEraseAll,
/// Erases a portion of non-secure flash. This MUST be done before writing!
FlashEraseRegion {
#[arg(value_parser = parse_int::parse::<u32>)]
start_address: u32,
#[arg(value_parser = parse_int::parse::<u32>)]
byte_count: u32, | file: PathBuf,
},
/// Erase the CMPA region (use to boot non-secure binaries again)
#[clap(name = "erase-cmpa")]
EraseCMPA,
/// Save the CMPA region to a file
ReadCMPA {
/// Write to FILE, or stdout if omitted
file: Option<PathBuf>,
},
/// Save the CFPA region to a file
ReadCFPA {
#[clap(short, long)]
page: Option<CfpaChoice>,
file: PathBuf,
},
/// Write the CFPA region from the contents of a file.
WriteCFPA {
#[clap(short, long)]
update_version: bool,
file: PathBuf,
},
/// Put a minimalist program on to allow attaching via SWD
Restore,
/// Send SB update file
SendSBUpdate {
file: PathBuf,
},
/// Set up key store this involves
/// - Enroll
/// - Setting UDS
/// - Setting SBKEK
/// - Writing to persistent storage
SetupKeyStore {
file: PathBuf,
},
/// Trigger a new enrollment in the PUF
Enroll,
/// Generate a new device secret for use in DICE
GenerateUDS,
/// Write keystore to flash
WriteKeyStore,
/// Erase existing keystore
EraseKeyStore,
/// Set the SBKEK, required for SB Updates
SetSBKek {
file: PathBuf,
},
GetProperty {
#[arg(value_parser = BootloaderProperty::from_str)]
prop: BootloaderProperty,
},
LastError,
}
#[derive(Copy, Clone, Debug, clap::ValueEnum)]
enum CfpaChoice {
Scratch,
Ping,
Pong,
}
#[derive(Debug, Parser)]
#[clap(name = "isp")]
struct Isp {
/// UART port
#[clap(name = "port")]
port: String,
/// How fast to run the UART. 57,600 baud seems very reliable but is rather
/// slow. In certain test setups we've gotten rates of up to 1Mbaud to work
/// reliably -- your mileage may vary!
#[clap(short = 'b', default_value = "57600")]
baud_rate: u32,
#[clap(subcommand)]
cmd: ISPCommand,
}
fn pretty_print_bootloader_prop(prop: BootloaderProperty, params: Vec<u32>) {
match prop {
BootloaderProperty::BootloaderVersion => {
println!("Version {:x}", params[1]);
}
BootloaderProperty::AvailablePeripherals => {
println!("Bitmask of peripherals {:x}", params[1]);
}
BootloaderProperty::FlashStart => {
println!("Flash start = 0x{:x}", params[1]);
}
BootloaderProperty::FlashSize => {
println!("Flash Size = {:x}", params[1]);
}
BootloaderProperty::FlashSectorSize => {
println!("Flash Sector Size = {:x}", params[1]);
}
BootloaderProperty::AvailableCommands => {
println!("Bitmask of commands = {:x}", params[1]);
}
BootloaderProperty::CRCStatus => {
println!("CRC status = {}", params[1]);
}
BootloaderProperty::VerifyWrites => {
println!("Verify Writes (bool) {}", params[1]);
}
BootloaderProperty::MaxPacketSize => {
println!("Max Packet Size = {}", params[1]);
}
BootloaderProperty::ReservedRegions => {
println!("Reserved regions? = {:x?}", params);
}
BootloaderProperty::RAMStart => {
println!("RAM start = 0x{:x}", params[1]);
}
BootloaderProperty::RAMSize => {
println!("RAM size = 0x{:x}", params[1]);
}
BootloaderProperty::SystemDeviceID => {
println!("DEVICE_ID0 register = 0x{:x}", params[1]);
}
BootloaderProperty::SecurityState => {
println!(
"Security State = {}",
if params[1] == 0x5aa55aa5 {
"UNLOCKED"
} else {
"LOCKED"
}
);
}
BootloaderProperty::UniqueID => {
println!(
"UUID = {:x}{:x}{:x}{:x}",
params[1], params[2], params[3], params[4]
);
}
BootloaderProperty::TargetVersion => {
println!("Target version = {:x}", params[1]);
}
BootloaderProperty::FlashPageSize => {
println!("Flash page size = {:x}", params[1]);
}
BootloaderProperty::IRQPinStatus => {
println!("IRQ Pin Status = {}", params[1]);
}
BootloaderProperty::FFRKeyStoreStatus => {
println!("FFR Store Status = {}", params[1]);
}
}
}
fn pretty_print_error(params: Vec<u32>) {
let reason = params[1] & 0xfffffff0;
if reason == 0 {
println!("No errors reported");
} else if reason == 0x0602f300 {
println!("Passive boot failed, reason:");
let specific_reason = params[2] & 0xfffffff0;
match specific_reason {
0x0b36f300 => {
println!("Secure image authentication failed. Check:");
println!("- Is the image you are booting signed?");
println!("- Is the image signed with the corresponding key?");
}
0x0b37f300 => {
println!("Application CRC failed");
}
0x0b35f300 => {
println!("Application entry point and/or stack is invalid");
}
0x0b38f300 => {
println!("DICE failure. Check:");
println!("- Key store is set up properly (UDS)");
}
0x0d70f300 => {
println!("Trying to boot a TZ image on a device that doesn't have TZ!");
}
0x0d71f300 => {
println!("Error reading TZ Image type from CMPA");
}
0x0d72f300 => {
println!("Bad TZ image mode, check your image");
}
0x0c00f500 => {
println!("Application returned to the ROM?");
}
_ => {
println!("Some other reason, raw bytes: {:x?}", params);
}
}
} else {
println!("Something bad happen: {:x?}", params);
}
}
fn main() -> Result<()> {
let cmd = Isp::parse();
// The target _technically_ has autobaud but it's very flaky
// and these seem to be the preferred settings
//
// We initially set the timeout short so we can drain the incoming buffer in
// a portable manner below. We'll adjust it up after that.
let mut port = serialport::new(&cmd.port, cmd.baud_rate)
.timeout(Duration::from_millis(100))
.data_bits(DataBits::Eight)
.flow_control(FlowControl::None)
.parity(Parity::None)
.stop_bits(StopBits::One)
.open()?;
// Extract any bytes left over in the serial port driver from previous
// interaction.
loop {
let mut throwaway = [0; 16];
match port.read(&mut throwaway) {
Ok(0) => {
// This should only happen on nonblocking reads, which we
// haven't asked for, but it does mean the buffer is empty so
// treat it as success.
break;
}
Ok(_) => {
// We've collected some characters to throw away, keep going.
}
Err(e) if e.kind() == ErrorKind::TimedOut => {
// Buffer is empty!
break;
}
Err(e) => {
return Err(e.into());
}
}
}
// Crank the timeout back up.
port.set_timeout(Duration::from_secs(1))?;
match cmd.cmd {
ISPCommand::Ping => {
do_ping(&mut *port)?;
println!("ping success.");
}
ISPCommand::ReadMemory {
address,
count,
path,
} => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, address, count)?;
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&path)?;
out.write_all(&m)?;
println!("Output written to {:?}", path);
}
ISPCommand::WriteMemory { address, file } => {
do_ping(&mut *port)?;
println!("If you didn't already erase the flash this operation will fail!");
println!("This operation may take a while");
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, address, &infile)?;
println!("Write complete!");
}
ISPCommand::FlashEraseAll => {
do_ping(&mut *port)?;
do_isp_flash_erase_all(&mut *port)?;
println!("Flash erased!");
}
ISPCommand::FlashEraseRegion {
start_address,
byte_count,
} => {
do_ping(&mut *port)?;
do_isp_flash_erase_region(&mut *port, start_address, byte_count)?;
println!("Flash region erased!");
}
// Yes this is just another write-memory call but remembering addresses
// is hard.
ISPCommand::WriteCMPA { file } => {
do_ping(&mut *port)?;
let infile = std::fs::read(file)?;
do_isp_write_memory(&mut *port, 0x9e400, &infile)?;
println!("Write to CMPA done!");
}
ISPCommand::EraseCMPA => {
do_ping(&mut *port)?;
// Write 512 bytes of zero
let bytes = [0; 512];
do_isp_write_memory(&mut *port, 0x9e400, &bytes)?;
println!("CMPA region erased!");
println!("You can now boot unsigned images");
}
ISPCommand::ReadCMPA { file } => {
do_ping(&mut *port)?;
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let mut out = match file {
Some(ref path) => Box::new(
std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(path)?,
) as Box<dyn Write>,
None => Box::new(std::io::stdout()) as Box<dyn Write>,
};
out.write_all(&m)?;
eprintln!("CMPA Output written to {:?}", file);
}
ISPCommand::ReadCFPA { page, file } => {
do_ping(&mut *port)?;
let data = if let Some(page) = page {
// Only read one page as requested
let addr = match page {
CfpaChoice::Scratch => 0x9de00,
CfpaChoice::Ping => 0x9e000,
CfpaChoice::Pong => 0x9e200,
};
do_isp_read_memory(&mut *port, addr, 512)?
} else {
// Read ping and pong pages and only write out the latest one.
let ping = do_isp_read_memory(&mut *port, 0x9e000, 512)
.context("reading CFPA ping page")?;
let pong = do_isp_read_memory(&mut *port, 0x9e200, 512)
.context("reading CFPA pong page")?;
let ping_d = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong_d = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"CFPA versions: ping={}, pong={}",
ping_d.version, pong_d.version
);
if ping_d.version > pong_d.version {
println!("choosing ping");
ping
} else {
println!("choosing pong");
pong
}
};
let mut out = std::fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&file)?;
out.write_all(&data)?;
println!("CFPA written to {file:?}");
}
ISPCommand::WriteCFPA {
update_version,
file,
} => {
do_ping(&mut *port)?;
let bytes = std::fs::read(file)?;
let mut new_cfpa = lpc55_areas::CFPAPage::from_bytes(
bytes[..].try_into().context("CFPA file is not 512 bytes")?,
)?;
// Read the CMPA so we can compare the two to try to avoid locking
// the user out of their chip.
let m = do_isp_read_memory(&mut *port, 0x9e400, 512)?;
let cmpa = lpc55_areas::CMPAPage::from_bytes(m[..].try_into().unwrap())?;
if (new_cfpa.dcfg_cc_socu_ns_pin!= 0 || new_cfpa.dcfg_cc_socu_ns_dflt!= 0)
&& (cmpa.cc_socu_pin == 0 || cmpa.cc_socu_dflt == 0)
{
bail!(
"It looks like the CMPA debug settings aren't set but \
the CFPA settings are! This will brick the chip!"
);
// TODO I guess it's remotely possible that we might want an
// override for this check.
}
if update_version {
// Read the current CFPA areas to figure out what version we
// need to set.
let ping = do_isp_read_memory(&mut *port, 0x9_e000, 512)?;
let pong = do_isp_read_memory(&mut *port, 0x9_e200, 512)?;
let ping = lpc55_areas::CFPAPage::from_bytes(ping[..].try_into().unwrap())?;
let pong = lpc55_areas::CFPAPage::from_bytes(pong[..].try_into().unwrap())?;
println!(
"ping sector v={}, pong sector v={}",
ping.version, pong.version
);
let start_version = u32::max(ping.version, pong.version);
new_cfpa.version = start_version + 1;
println!("note: updated version is {}", new_cfpa.version);
}
let new_bytes = new_cfpa.to_vec()?;
do_isp_write_memory(&mut *port, 0x9_de00, &new_bytes)?;
println!("Write to CFPA done!");
}
ISPCommand::Restore => {
do_ping(&mut *port)?;
println!("Erasing flash");
do_isp_flash_erase_all(&mut *port)?;
println!("Erasing done.");
// we need to fill 0x134 bytes to cover the vector table
// plus all interrupts
let mut bytes: [u8; 0x134] = [0u8; 0x134];
// Choose a RAM address for the stack (we shouldn't use the stack
// but it should be valid anyway)
byteorder::LittleEndian::write_u32(&mut bytes[0x0..0x4], 0x20004000);
// Everything else targets the loop to branch instruction at 0x00000130
let mut offset = 4;
while offset < 0x130 {
byteorder::LittleEndian::write_u32(&mut bytes[offset..offset + 4], 0x00000131);
offset += 4;
}
// This is two branch to self instructions
byteorder::LittleEndian::write_u32(&mut bytes[0x130..0x134], 0xe7fee7fe);
println!("Writing bytes");
do_isp_write_memory(&mut *port, 0x0, &bytes)?;
println!("Restore done! SWD should work now.");
}
ISPCommand::SendSBUpdate { file } => {
do_ping(&mut *port)?;
println!("Sending SB file, this may take a while");
let infile = std::fs::read(file)?;
do_recv_sb_file(&mut *port, &infile)?;
println!("Send complete!");
}
ISPCommand::Enroll => {
do_ping(&mut *port)?;
println!("Generating new activation code");
do_enroll(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::GenerateUDS => {
do_ping(&mut *port)?;
println!("Generating new UDS");
do_generate_uds(&mut *port)?;
println!("done.");
println!("If you want to save this, remember to write to non-volatile memory");
}
ISPCommand::WriteKeyStore => {
do_ping(&mut *port)?;
println!("Writing key store to flash");
do_save_keystore(&mut *port)?;
println!("done.");
}
| },
/// Write a file to the CMPA region
#[clap(name = "write-cmpa")]
WriteCMPA { | random_line_split |
mod.rs | Item = (PathBuf, u64)>> {
let mut files: Vec<_> = WalkDir::new(path.as_ref())
.into_iter()
.filter_map(|e| {
e.ok().and_then(|f| {
// Only look at files
if f.file_type().is_file() {
// Get the last-modified time, size, and the full path.
f.metadata().ok().and_then(|m| {
m.modified()
.ok()
.map(|mtime| (mtime, f.path().to_owned(), m.len()))
})
} else {
None
}
})
})
.collect();
// Sort by last-modified-time, so oldest file first.
files.sort_by_key(|k| k.0);
Box::new(files.into_iter().map(|(_mtime, path, size)| (path, size)))
}
/// An LRU cache of files on disk.
pub struct LruDiskCache<S: BuildHasher = RandomState> {
lru: LruCache<OsString, u64, S, FileSize>,
root: PathBuf,
}
/// Errors returned by this crate.
#[derive(Debug)]
pub enum Error {
/// The file was too large to fit in the cache.
FileTooLarge,
/// The file was not in the cache.
FileNotInCache,
/// An IO Error occurred.
Io(io::Error),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::FileTooLarge => write!(f, "File too large"),
Error::FileNotInCache => write!(f, "File not in cache"),
Error::Io(ref e) => write!(f, "{}", e),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match self {
Error::FileTooLarge => None,
Error::FileNotInCache => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::Io(e)
}
}
/// A convenience `Result` type
pub type Result<T> = std::result::Result<T, Error>;
/// Trait objects can't be bounded by more than one non-builtin trait.
pub trait ReadSeek: Read + Seek + Send {}
impl<T: Read + Seek + Send> ReadSeek for T {}
enum AddFile<'a> {
AbsPath(PathBuf),
RelPath(&'a OsStr),
}
impl LruDiskCache {
/// Create an `LruDiskCache` that stores files in `path`, limited to `size` bytes.
///
/// Existing files in `path` will be stored with their last-modified time from the filesystem
/// used as the order for the recency of their use. Any files that are individually larger
/// than `size` bytes will be removed.
///
/// The cache is not observant of changes to files under `path` from external sources, it
/// expects to have sole maintence of the contents.
pub fn new<T>(path: T, size: u64) -> Result<Self>
where
PathBuf: From<T>,
{
LruDiskCache {
lru: LruCache::with_meter(size, FileSize),
root: PathBuf::from(path),
}
.init()
}
/// Return the current size of all the files in the cache.
pub fn size(&self) -> u64 {
self.lru.size()
}
/// Return the count of entries in the cache.
pub fn len(&self) -> usize {
self.lru.len()
}
pub fn is_empty(&self) -> bool {
self.lru.len() == 0
}
/// Return the maximum size of the cache.
pub fn capacity(&self) -> u64 {
self.lru.capacity()
}
/// Return the path in which the cache is stored.
pub fn path(&self) -> &Path {
self.root.as_path()
}
/// Return the path that `key` would be stored at.
fn rel_to_abs_path<K: AsRef<Path>>(&self, rel_path: K) -> PathBuf {
self.root.join(rel_path)
}
/// Scan `self.root` for existing files and store them.
fn init(mut self) -> Result<Self> {
fs::create_dir_all(&self.root)?;
for (file, size) in get_all_files(&self.root) {
if!self.can_store(size) {
fs::remove_file(file).unwrap_or_else(|e| {
error!(
"Error removing file `{}` which is too large for the cache ({} bytes)",
e, size
)
});
} else {
self.add_file(AddFile::AbsPath(file), size)
.unwrap_or_else(|e| error!("Error adding file: {}", e));
}
}
Ok(self)
}
/// Returns `true` if the disk cache can store a file of `size` bytes.
pub fn can_store(&self, size: u64) -> bool {
size <= self.lru.capacity()
}
/// Add the file at `path` of size `size` to the cache.
fn add_file(&mut self, addfile_path: AddFile<'_>, size: u64) -> Result<()> | }
fn insert_by<K: AsRef<OsStr>, F: FnOnce(&Path) -> io::Result<()>>(
&mut self,
key: K,
size: Option<u64>,
by: F,
) -> Result<()> {
if let Some(size) = size {
if!self.can_store(size) {
return Err(Error::FileTooLarge);
}
}
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
fs::create_dir_all(path.parent().expect("Bad path?"))?;
by(&path)?;
let size = match size {
Some(size) => size,
None => fs::metadata(path)?.len(),
};
self.add_file(AddFile::RelPath(rel_path), size)
.map_err(|e| {
error!(
"Failed to insert file `{}`: {}",
rel_path.to_string_lossy(),
e
);
fs::remove_file(self.rel_to_abs_path(rel_path))
.expect("Failed to remove file we just created!");
e
})
}
/// Add a file by calling `with` with the open `File` corresponding to the cache at path `key`.
pub fn insert_with<K: AsRef<OsStr>, F: FnOnce(File) -> io::Result<()>>(
&mut self,
key: K,
with: F,
) -> Result<()> {
self.insert_by(key, None, |path| with(File::create(path)?))
}
/// Add a file with `bytes` as its contents to the cache at path `key`.
pub fn insert_bytes<K: AsRef<OsStr>>(&mut self, key: K, bytes: &[u8]) -> Result<()> {
self.insert_by(key, Some(bytes.len() as u64), |path| {
let mut f = File::create(path)?;
f.write_all(bytes)?;
Ok(())
})
}
/// Add an existing file at `path` to the cache at path `key`.
pub fn insert_file<K: AsRef<OsStr>, P: AsRef<OsStr>>(&mut self, key: K, path: P) -> Result<()> {
let size = fs::metadata(path.as_ref())?.len();
self.insert_by(key, Some(size), |new_path| {
fs::rename(path.as_ref(), new_path).or_else(|_| {
warn!("fs::rename failed, falling back to copy!");
fs::copy(path.as_ref(), new_path)?;
fs::remove_file(path.as_ref()).unwrap_or_else(|e| {
error!("Failed to remove original file in insert_file: {}", e)
});
Ok(())
})
})
}
/// Return `true` if a file with path `key` is in the cache.
pub fn contains_key<K: AsRef<OsStr>>(&self, key: K) -> bool {
self.lru.contains_key(key.as_ref())
}
/// Get an opened `File` for `key`, if one exists and can be opened. Updates the LRU state
/// of the file if present. Avoid using this method if at all possible, prefer `.get`.
pub fn get_file<K: AsRef<OsStr>>(&mut self, key: K) -> Result<File> {
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
self.lru
.get(rel_path)
.ok_or(Error::FileNotInCache)
.and_then(|_| {
let t = FileTime::now();
set_file_times(&path, t, t)?;
File::open(path).map_err(Into::into)
})
}
/// Get an opened readable and seekable handle to the file at `key`, if one exists and can
/// be opened. Updates the LRU state of the file if present.
pub fn get<K: AsRef<OsStr>>(&mut self, key: K) -> Result<Box<dyn ReadSeek>> {
self.get_file(key).map(|f| Box::new(f) as Box<dyn ReadSeek>)
}
/// Remove the given key from the cache.
pub fn remove<K: AsRef<OsStr>>(&mut self, key: K) -> Result<()> {
match self.lru.remove(key.as_ref()) {
Some(_) => {
let path = self.rel_to_abs_path(key.as_ref());
fs::remove_file(&path).map_err(|e| {
error!("Error removing file from cache: `{:?}`: {}", path, e);
Into::into(e)
})
}
None => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::fs::{self, File};
use super::{Error, LruDiskCache};
use filetime::{set_file_times, FileTime};
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use tempfile::TempDir;
struct TestFixture {
/// Temp directory.
pub tempdir: TempDir,
}
fn create_file<T: AsRef<Path>, F: FnOnce(File) -> io::Result<()>>(
dir: &Path,
path: T,
fill_contents: F,
) -> io::Result<PathBuf> {
let b = dir.join(path);
fs::create_dir_all(b.parent().unwrap())?;
let f = fs::File::create(&b)?;
fill_contents(f)?;
b.canonicalize()
}
/// Set the last modified time of `path` backwards by `seconds` seconds.
fn set_mtime_back<T: AsRef<Path>>(path: T, seconds: usize) {
let m = fs::metadata(path.as_ref()).unwrap();
let t = FileTime::from_last_modification_time(&m);
let t = FileTime::from_unix_time(t.unix_seconds() - seconds as i64, t.nanoseconds());
set_file_times(path, t, t).unwrap();
}
fn read_all<R: Read>(r: &mut R) -> io::Result<Vec<u8>> {
let mut v = vec![];
r.read_to_end(&mut v)?;
Ok(v)
}
impl TestFixture {
pub fn new() -> TestFixture {
TestFixture {
tempdir: tempfile::Builder::new()
.prefix("lru-disk-cache-test")
.tempdir()
.unwrap(),
}
}
pub fn tmp(&self) -> &Path {
self.tempdir.path()
}
pub fn create_file<T: AsRef<Path>>(&self, path: T, size: usize) -> PathBuf {
create_file(self.tempdir.path(), path, |mut f| {
f.write_all(&vec![0; size])
})
.unwrap()
}
}
#[test]
fn test_empty_dir() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp(), 1024).unwrap();
}
#[test]
fn test_missing_root() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp().join("not-here"), 1024).unwrap();
}
#[test]
fn test_some_existing_files() {
let f = TestFixture::new();
f.create_file("file1", 10);
f.create_file("file2", 10);
let c = LruDiskCache::new(f.tmp(), 20).unwrap();
assert_eq!(c.size(), 20);
assert_eq!(c.len(), 2);
}
#[test]
fn test_existing_file_too_large() {
let f = TestFixture::new();
// Create files explicitly in the past.
set_mtime_back(f.create_file("file1", 10), 10);
set_mtime_back(f.create_file("file2", 10), 5);
let c = LruDiskCache::new(f.tmp(), 15).unwrap();
assert_eq!(c.size(), 10);
assert_eq!(c.len(), 1);
assert!(!c.contains_key("file1"));
assert!(c.contains_key("file2"));
}
#[test]
fn test_existing_files_lru_mtime() {
let f = TestFixture::new();
// Create files explicitly in the past.
set_mtime_back(f.create_file("file1", 10), 5);
set_mtime_back(f.create_file("file2", 10), 10);
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The oldest file on disk should have been removed.
assert!(!c.contains_key("file2"));
assert!(c.contains_key("file1"));
}
#[test]
fn test_insert_bytes() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("a/b/c", &[0; 10]).unwrap();
assert!(c.contains_key("a/b/c"));
c.insert_bytes("a/b/d", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// Adding this third file should put the cache above the limit.
c.insert_bytes("x/y/z", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("a/b/c"));
assert!(!f.tmp().join("a/b/c").exists());
}
#[test]
fn test_insert_bytes_exact() {
// Test that files adding up to exactly the size limit works.
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 20).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file1"));
}
#[test]
fn test_add_get_lru() {
let f = TestFixture::new();
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
// Get the file to bump its LRU status.
assert_eq!(
read_all(&mut c.get("file1").unwrap()).unwrap(),
vec![1u8; 10]
);
// Adding this third file should put the cache above the limit.
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("file2"));
}
// Get rid of the cache, to test that the LRU persists on-disk as mtimes.
// This is hacky, but mtime resolution on my mac with HFS+ is only 1 second, so we either
// need to have a 1 second sleep in the test (boo) or adjust the mtimes back a bit so
// that updating one file to the current time actually works to make it newer.
set_mtime_back(f.tmp().join("file1"), 5);
set_mtime_back(f.tmp().join("file3"), 5);
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
// Bump file1 again.
c.get("file1").unwrap();
}
// Now check that the on-disk mtimes were updated and used.
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert!(c.contains_key("file1"));
assert!(c.contains_key("file3"));
assert_eq!(c.size(), 20);
// Add another file to bump out the least-recently-used.
c.insert_bytes("file4", &[4; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file3"));
assert!(c.contains_key("file1"));
}
}
#[test]
fn test_insert_bytes_too_large() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 1).unwrap();
match c.insert_bytes("a/b/c", &[0; 2]) {
Err(Error::FileTooLarge) => {}
x => panic!("Unexpected result: {:?}", x),
}
}
#[test]
fn test_insert_file() {
let f = TestFixture::new();
let p1 = f.create_file("file1", 10);
let p2 = f.create_file("file2", 10);
let p3 = f.create_file("file3", 10);
let mut c = LruDiskCache::new(f.tmp().join("cache"), 25).unwrap();
c.insert_file("file1", &p1).unwrap();
assert_eq!(c.len(), 1);
c.insert_file("file2", &p2).unwrap();
assert_eq!(c.len(), 2);
// Get the file to bump its LRU status.
assert_eq!(
read_all(&mut c.get("file1").unwrap()).unwrap(),
vec![0u8; 10]
);
| {
if !self.can_store(size) {
return Err(Error::FileTooLarge);
}
let rel_path = match addfile_path {
AddFile::AbsPath(ref p) => p.strip_prefix(&self.root).expect("Bad path?").as_os_str(),
AddFile::RelPath(p) => p,
};
//TODO: ideally LRUCache::insert would give us back the entries it had to remove.
while self.lru.size() + size > self.lru.capacity() {
let (rel_path, _) = self.lru.remove_lru().expect("Unexpectedly empty cache!");
let remove_path = self.rel_to_abs_path(rel_path);
//TODO: check that files are removable during `init`, so that this is only
// due to outside interference.
fs::remove_file(&remove_path).unwrap_or_else(|e| {
panic!("Error removing file from cache: `{:?}`: {}", remove_path, e)
});
}
self.lru.insert(rel_path.to_owned(), size);
Ok(()) | identifier_body |
mod.rs | <Q:?Sized>(&self, _: &Q, v: &u64) -> usize
where
K: Borrow<Q>,
{
*v as usize
}
}
/// Return an iterator of `(path, size)` of files under `path` sorted by ascending last-modified
/// time, such that the oldest modified file is returned first.
fn get_all_files<P: AsRef<Path>>(path: P) -> Box<dyn Iterator<Item = (PathBuf, u64)>> {
let mut files: Vec<_> = WalkDir::new(path.as_ref())
.into_iter()
.filter_map(|e| {
e.ok().and_then(|f| {
// Only look at files
if f.file_type().is_file() {
// Get the last-modified time, size, and the full path.
f.metadata().ok().and_then(|m| {
m.modified()
.ok()
.map(|mtime| (mtime, f.path().to_owned(), m.len()))
})
} else {
None
}
})
})
.collect();
// Sort by last-modified-time, so oldest file first.
files.sort_by_key(|k| k.0);
Box::new(files.into_iter().map(|(_mtime, path, size)| (path, size)))
}
/// An LRU cache of files on disk.
pub struct LruDiskCache<S: BuildHasher = RandomState> {
lru: LruCache<OsString, u64, S, FileSize>,
root: PathBuf,
}
/// Errors returned by this crate.
#[derive(Debug)]
pub enum Error {
/// The file was too large to fit in the cache.
FileTooLarge,
/// The file was not in the cache.
FileNotInCache,
/// An IO Error occurred.
Io(io::Error),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::FileTooLarge => write!(f, "File too large"),
Error::FileNotInCache => write!(f, "File not in cache"),
Error::Io(ref e) => write!(f, "{}", e),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match self {
Error::FileTooLarge => None,
Error::FileNotInCache => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::Io(e)
}
}
/// A convenience `Result` type
pub type Result<T> = std::result::Result<T, Error>;
/// Trait objects can't be bounded by more than one non-builtin trait.
pub trait ReadSeek: Read + Seek + Send {}
impl<T: Read + Seek + Send> ReadSeek for T {}
enum AddFile<'a> {
AbsPath(PathBuf),
RelPath(&'a OsStr),
}
impl LruDiskCache {
/// Create an `LruDiskCache` that stores files in `path`, limited to `size` bytes.
///
/// Existing files in `path` will be stored with their last-modified time from the filesystem
/// used as the order for the recency of their use. Any files that are individually larger
/// than `size` bytes will be removed.
///
/// The cache is not observant of changes to files under `path` from external sources, it
/// expects to have sole maintence of the contents.
pub fn new<T>(path: T, size: u64) -> Result<Self>
where
PathBuf: From<T>,
{
LruDiskCache {
lru: LruCache::with_meter(size, FileSize),
root: PathBuf::from(path),
}
.init()
}
/// Return the current size of all the files in the cache.
pub fn size(&self) -> u64 {
self.lru.size()
}
/// Return the count of entries in the cache.
pub fn len(&self) -> usize {
self.lru.len()
}
pub fn is_empty(&self) -> bool {
self.lru.len() == 0
}
/// Return the maximum size of the cache.
pub fn capacity(&self) -> u64 {
self.lru.capacity()
}
/// Return the path in which the cache is stored.
pub fn path(&self) -> &Path {
self.root.as_path()
}
/// Return the path that `key` would be stored at.
fn rel_to_abs_path<K: AsRef<Path>>(&self, rel_path: K) -> PathBuf {
self.root.join(rel_path)
}
/// Scan `self.root` for existing files and store them.
fn init(mut self) -> Result<Self> {
fs::create_dir_all(&self.root)?;
for (file, size) in get_all_files(&self.root) {
if!self.can_store(size) {
fs::remove_file(file).unwrap_or_else(|e| {
error!(
"Error removing file `{}` which is too large for the cache ({} bytes)",
e, size
)
});
} else {
self.add_file(AddFile::AbsPath(file), size)
.unwrap_or_else(|e| error!("Error adding file: {}", e));
}
}
Ok(self)
}
/// Returns `true` if the disk cache can store a file of `size` bytes.
pub fn can_store(&self, size: u64) -> bool {
size <= self.lru.capacity()
}
/// Add the file at `path` of size `size` to the cache.
fn add_file(&mut self, addfile_path: AddFile<'_>, size: u64) -> Result<()> {
if!self.can_store(size) {
return Err(Error::FileTooLarge);
}
let rel_path = match addfile_path {
AddFile::AbsPath(ref p) => p.strip_prefix(&self.root).expect("Bad path?").as_os_str(),
AddFile::RelPath(p) => p,
};
//TODO: ideally LRUCache::insert would give us back the entries it had to remove.
while self.lru.size() + size > self.lru.capacity() {
let (rel_path, _) = self.lru.remove_lru().expect("Unexpectedly empty cache!");
let remove_path = self.rel_to_abs_path(rel_path);
//TODO: check that files are removable during `init`, so that this is only
// due to outside interference.
fs::remove_file(&remove_path).unwrap_or_else(|e| {
panic!("Error removing file from cache: `{:?}`: {}", remove_path, e)
});
}
self.lru.insert(rel_path.to_owned(), size);
Ok(())
}
fn insert_by<K: AsRef<OsStr>, F: FnOnce(&Path) -> io::Result<()>>(
&mut self,
key: K,
size: Option<u64>,
by: F,
) -> Result<()> {
if let Some(size) = size {
if!self.can_store(size) {
return Err(Error::FileTooLarge);
}
}
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
fs::create_dir_all(path.parent().expect("Bad path?"))?;
by(&path)?;
let size = match size {
Some(size) => size,
None => fs::metadata(path)?.len(),
};
self.add_file(AddFile::RelPath(rel_path), size)
.map_err(|e| {
error!(
"Failed to insert file `{}`: {}",
rel_path.to_string_lossy(),
e
);
fs::remove_file(self.rel_to_abs_path(rel_path))
.expect("Failed to remove file we just created!");
e
})
}
/// Add a file by calling `with` with the open `File` corresponding to the cache at path `key`.
pub fn insert_with<K: AsRef<OsStr>, F: FnOnce(File) -> io::Result<()>>(
&mut self,
key: K,
with: F,
) -> Result<()> {
self.insert_by(key, None, |path| with(File::create(path)?))
}
/// Add a file with `bytes` as its contents to the cache at path `key`.
pub fn insert_bytes<K: AsRef<OsStr>>(&mut self, key: K, bytes: &[u8]) -> Result<()> {
self.insert_by(key, Some(bytes.len() as u64), |path| {
let mut f = File::create(path)?;
f.write_all(bytes)?;
Ok(())
})
}
/// Add an existing file at `path` to the cache at path `key`.
pub fn insert_file<K: AsRef<OsStr>, P: AsRef<OsStr>>(&mut self, key: K, path: P) -> Result<()> {
let size = fs::metadata(path.as_ref())?.len();
self.insert_by(key, Some(size), |new_path| {
fs::rename(path.as_ref(), new_path).or_else(|_| {
warn!("fs::rename failed, falling back to copy!");
fs::copy(path.as_ref(), new_path)?;
fs::remove_file(path.as_ref()).unwrap_or_else(|e| {
error!("Failed to remove original file in insert_file: {}", e)
});
Ok(())
})
})
}
/// Return `true` if a file with path `key` is in the cache.
pub fn contains_key<K: AsRef<OsStr>>(&self, key: K) -> bool {
self.lru.contains_key(key.as_ref())
}
/// Get an opened `File` for `key`, if one exists and can be opened. Updates the LRU state
/// of the file if present. Avoid using this method if at all possible, prefer `.get`.
pub fn get_file<K: AsRef<OsStr>>(&mut self, key: K) -> Result<File> {
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
self.lru
.get(rel_path)
.ok_or(Error::FileNotInCache)
.and_then(|_| {
let t = FileTime::now();
set_file_times(&path, t, t)?;
File::open(path).map_err(Into::into)
})
}
/// Get an opened readable and seekable handle to the file at `key`, if one exists and can
/// be opened. Updates the LRU state of the file if present.
pub fn get<K: AsRef<OsStr>>(&mut self, key: K) -> Result<Box<dyn ReadSeek>> {
self.get_file(key).map(|f| Box::new(f) as Box<dyn ReadSeek>)
}
/// Remove the given key from the cache.
pub fn remove<K: AsRef<OsStr>>(&mut self, key: K) -> Result<()> {
match self.lru.remove(key.as_ref()) {
Some(_) => {
let path = self.rel_to_abs_path(key.as_ref());
fs::remove_file(&path).map_err(|e| {
error!("Error removing file from cache: `{:?}`: {}", path, e);
Into::into(e)
})
}
None => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::fs::{self, File};
use super::{Error, LruDiskCache};
use filetime::{set_file_times, FileTime};
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use tempfile::TempDir;
struct TestFixture {
/// Temp directory.
pub tempdir: TempDir,
}
fn create_file<T: AsRef<Path>, F: FnOnce(File) -> io::Result<()>>(
dir: &Path,
path: T,
fill_contents: F,
) -> io::Result<PathBuf> {
let b = dir.join(path);
fs::create_dir_all(b.parent().unwrap())?;
let f = fs::File::create(&b)?;
fill_contents(f)?;
b.canonicalize()
}
/// Set the last modified time of `path` backwards by `seconds` seconds.
fn set_mtime_back<T: AsRef<Path>>(path: T, seconds: usize) {
let m = fs::metadata(path.as_ref()).unwrap();
let t = FileTime::from_last_modification_time(&m);
let t = FileTime::from_unix_time(t.unix_seconds() - seconds as i64, t.nanoseconds());
set_file_times(path, t, t).unwrap();
}
fn read_all<R: Read>(r: &mut R) -> io::Result<Vec<u8>> {
let mut v = vec![];
r.read_to_end(&mut v)?;
Ok(v)
}
impl TestFixture {
pub fn new() -> TestFixture {
TestFixture {
tempdir: tempfile::Builder::new()
.prefix("lru-disk-cache-test")
.tempdir()
.unwrap(),
}
}
pub fn tmp(&self) -> &Path {
self.tempdir.path()
}
pub fn create_file<T: AsRef<Path>>(&self, path: T, size: usize) -> PathBuf {
create_file(self.tempdir.path(), path, |mut f| {
f.write_all(&vec![0; size])
})
.unwrap()
}
}
#[test]
fn test_empty_dir() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp(), 1024).unwrap();
}
#[test]
fn test_missing_root() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp().join("not-here"), 1024).unwrap();
}
#[test]
fn test_some_existing_files() {
let f = TestFixture::new();
f.create_file("file1", 10);
f.create_file("file2", 10);
let c = LruDiskCache::new(f.tmp(), 20).unwrap();
assert_eq!(c.size(), 20);
assert_eq!(c.len(), 2);
}
#[test]
fn test_existing_file_too_large() {
let f = TestFixture::new();
// Create files explicitly in the past.
set_mtime_back(f.create_file("file1", 10), 10);
set_mtime_back(f.create_file("file2", 10), 5);
let c = LruDiskCache::new(f.tmp(), 15).unwrap();
assert_eq!(c.size(), 10);
assert_eq!(c.len(), 1);
assert!(!c.contains_key("file1"));
assert!(c.contains_key("file2"));
}
#[test]
fn test_existing_files_lru_mtime() {
let f = TestFixture::new();
// Create files explicitly in the past.
set_mtime_back(f.create_file("file1", 10), 5);
set_mtime_back(f.create_file("file2", 10), 10);
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The oldest file on disk should have been removed.
assert!(!c.contains_key("file2"));
assert!(c.contains_key("file1"));
}
#[test]
fn test_insert_bytes() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("a/b/c", &[0; 10]).unwrap();
assert!(c.contains_key("a/b/c"));
c.insert_bytes("a/b/d", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// Adding this third file should put the cache above the limit.
c.insert_bytes("x/y/z", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("a/b/c"));
assert!(!f.tmp().join("a/b/c").exists());
}
#[test]
fn test_insert_bytes_exact() {
// Test that files adding up to exactly the size limit works.
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 20).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file1"));
}
#[test]
fn test_add_get_lru() {
let f = TestFixture::new();
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
// Get the file to bump its LRU status.
assert_eq!(
read_all(&mut c.get("file1").unwrap()).unwrap(),
vec![1u8; 10]
);
// Adding this third file should put the cache above the limit.
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("file2"));
}
// Get rid of the cache, to test that the LRU persists on-disk as mtimes.
// This is hacky, but mtime resolution on my mac with HFS+ is only 1 second, so we either
// need to have a 1 second sleep in the test (boo) or adjust the mtimes back a bit so
// that updating one file to the current time actually works to make it newer.
set_mtime_back(f.tmp().join("file1"), 5);
set_mtime_back(f.tmp().join("file3"), 5);
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
// Bump file1 again.
c.get("file1").unwrap();
}
// Now check that the on-disk mtimes were updated and used.
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert!(c.contains_key("file1"));
assert!(c.contains_key("file3"));
assert_eq!(c.size(), 20);
// Add another file to bump out the least-recently-used.
c.insert_bytes("file4", &[4; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file3"));
assert!(c.contains_key("file1"));
}
}
#[test]
fn test_insert_bytes_too_large() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 1).unwrap();
match c.insert_bytes("a/b/c", &[0; 2]) {
Err(Error::FileTooLarge) => {}
x => panic!("Unexpected result: {:?}", x),
}
}
#[test]
fn test_insert_file() {
let f = TestFixture::new();
let p1 = f.create_file("file1", 10);
let p2 = f.create_file("file2", 10);
let p3 = f.create_file("file3", 10);
let mut c = LruDiskCache::new(f.tmp().join("cache"), 25).unwrap();
c.insert_file("file1", &p1).unwrap();
assert_eq!(c.len(), 1);
| measure | identifier_name |
|
mod.rs | <Item = (PathBuf, u64)>> {
let mut files: Vec<_> = WalkDir::new(path.as_ref())
.into_iter()
.filter_map(|e| {
e.ok().and_then(|f| {
// Only look at files
if f.file_type().is_file() {
// Get the last-modified time, size, and the full path.
f.metadata().ok().and_then(|m| {
m.modified()
.ok()
.map(|mtime| (mtime, f.path().to_owned(), m.len()))
})
} else {
None
}
})
})
.collect();
// Sort by last-modified-time, so oldest file first.
files.sort_by_key(|k| k.0);
Box::new(files.into_iter().map(|(_mtime, path, size)| (path, size)))
}
/// An LRU cache of files on disk.
pub struct LruDiskCache<S: BuildHasher = RandomState> {
lru: LruCache<OsString, u64, S, FileSize>,
root: PathBuf,
}
/// Errors returned by this crate.
#[derive(Debug)]
pub enum Error {
/// The file was too large to fit in the cache.
FileTooLarge,
/// The file was not in the cache.
FileNotInCache,
/// An IO Error occurred.
Io(io::Error),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::FileTooLarge => write!(f, "File too large"),
Error::FileNotInCache => write!(f, "File not in cache"),
Error::Io(ref e) => write!(f, "{}", e),
}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match self {
Error::FileTooLarge => None,
Error::FileNotInCache => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::Io(e)
}
}
/// A convenience `Result` type
pub type Result<T> = std::result::Result<T, Error>;
/// Trait objects can't be bounded by more than one non-builtin trait.
pub trait ReadSeek: Read + Seek + Send {}
impl<T: Read + Seek + Send> ReadSeek for T {}
enum AddFile<'a> {
AbsPath(PathBuf),
RelPath(&'a OsStr),
}
impl LruDiskCache {
/// Create an `LruDiskCache` that stores files in `path`, limited to `size` bytes.
///
/// Existing files in `path` will be stored with their last-modified time from the filesystem
/// used as the order for the recency of their use. Any files that are individually larger
/// than `size` bytes will be removed.
///
/// The cache is not observant of changes to files under `path` from external sources, it
/// expects to have sole maintence of the contents.
pub fn new<T>(path: T, size: u64) -> Result<Self>
where
PathBuf: From<T>,
{
LruDiskCache {
lru: LruCache::with_meter(size, FileSize),
root: PathBuf::from(path),
}
.init()
}
/// Return the current size of all the files in the cache.
pub fn size(&self) -> u64 {
self.lru.size()
}
/// Return the count of entries in the cache.
pub fn len(&self) -> usize {
self.lru.len()
}
pub fn is_empty(&self) -> bool {
self.lru.len() == 0
}
/// Return the maximum size of the cache.
pub fn capacity(&self) -> u64 {
self.lru.capacity()
}
/// Return the path in which the cache is stored.
pub fn path(&self) -> &Path {
self.root.as_path()
}
/// Return the path that `key` would be stored at.
fn rel_to_abs_path<K: AsRef<Path>>(&self, rel_path: K) -> PathBuf {
self.root.join(rel_path)
}
/// Scan `self.root` for existing files and store them.
fn init(mut self) -> Result<Self> {
fs::create_dir_all(&self.root)?;
for (file, size) in get_all_files(&self.root) {
if!self.can_store(size) {
fs::remove_file(file).unwrap_or_else(|e| {
error!(
"Error removing file `{}` which is too large for the cache ({} bytes)",
e, size
)
});
} else {
self.add_file(AddFile::AbsPath(file), size)
.unwrap_or_else(|e| error!("Error adding file: {}", e));
}
}
Ok(self)
}
/// Returns `true` if the disk cache can store a file of `size` bytes.
pub fn can_store(&self, size: u64) -> bool {
size <= self.lru.capacity()
}
/// Add the file at `path` of size `size` to the cache.
fn add_file(&mut self, addfile_path: AddFile<'_>, size: u64) -> Result<()> {
if!self.can_store(size) {
return Err(Error::FileTooLarge);
}
let rel_path = match addfile_path {
AddFile::AbsPath(ref p) => p.strip_prefix(&self.root).expect("Bad path?").as_os_str(),
AddFile::RelPath(p) => p,
};
//TODO: ideally LRUCache::insert would give us back the entries it had to remove.
while self.lru.size() + size > self.lru.capacity() {
let (rel_path, _) = self.lru.remove_lru().expect("Unexpectedly empty cache!");
let remove_path = self.rel_to_abs_path(rel_path);
//TODO: check that files are removable during `init`, so that this is only
// due to outside interference.
fs::remove_file(&remove_path).unwrap_or_else(|e| {
panic!("Error removing file from cache: `{:?}`: {}", remove_path, e)
});
}
self.lru.insert(rel_path.to_owned(), size);
Ok(())
}
fn insert_by<K: AsRef<OsStr>, F: FnOnce(&Path) -> io::Result<()>>(
&mut self,
key: K,
size: Option<u64>,
by: F,
) -> Result<()> {
if let Some(size) = size {
if!self.can_store(size) {
return Err(Error::FileTooLarge);
}
}
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
fs::create_dir_all(path.parent().expect("Bad path?"))?;
by(&path)?;
let size = match size {
Some(size) => size,
None => fs::metadata(path)?.len(),
};
self.add_file(AddFile::RelPath(rel_path), size)
.map_err(|e| {
error!(
"Failed to insert file `{}`: {}",
rel_path.to_string_lossy(),
e
);
fs::remove_file(self.rel_to_abs_path(rel_path))
.expect("Failed to remove file we just created!");
e
})
}
/// Add a file by calling `with` with the open `File` corresponding to the cache at path `key`.
pub fn insert_with<K: AsRef<OsStr>, F: FnOnce(File) -> io::Result<()>>(
&mut self,
key: K,
with: F,
) -> Result<()> {
self.insert_by(key, None, |path| with(File::create(path)?))
}
/// Add a file with `bytes` as its contents to the cache at path `key`.
pub fn insert_bytes<K: AsRef<OsStr>>(&mut self, key: K, bytes: &[u8]) -> Result<()> {
self.insert_by(key, Some(bytes.len() as u64), |path| {
let mut f = File::create(path)?;
f.write_all(bytes)?;
Ok(())
})
}
/// Add an existing file at `path` to the cache at path `key`.
pub fn insert_file<K: AsRef<OsStr>, P: AsRef<OsStr>>(&mut self, key: K, path: P) -> Result<()> {
let size = fs::metadata(path.as_ref())?.len();
self.insert_by(key, Some(size), |new_path| {
fs::rename(path.as_ref(), new_path).or_else(|_| {
warn!("fs::rename failed, falling back to copy!");
fs::copy(path.as_ref(), new_path)?;
fs::remove_file(path.as_ref()).unwrap_or_else(|e| {
error!("Failed to remove original file in insert_file: {}", e)
});
Ok(())
})
})
}
/// Return `true` if a file with path `key` is in the cache.
pub fn contains_key<K: AsRef<OsStr>>(&self, key: K) -> bool {
self.lru.contains_key(key.as_ref())
}
/// Get an opened `File` for `key`, if one exists and can be opened. Updates the LRU state
/// of the file if present. Avoid using this method if at all possible, prefer `.get`.
pub fn get_file<K: AsRef<OsStr>>(&mut self, key: K) -> Result<File> {
let rel_path = key.as_ref();
let path = self.rel_to_abs_path(rel_path);
self.lru
.get(rel_path)
.ok_or(Error::FileNotInCache)
.and_then(|_| {
let t = FileTime::now();
set_file_times(&path, t, t)?;
File::open(path).map_err(Into::into)
})
}
/// Get an opened readable and seekable handle to the file at `key`, if one exists and can
/// be opened. Updates the LRU state of the file if present.
pub fn get<K: AsRef<OsStr>>(&mut self, key: K) -> Result<Box<dyn ReadSeek>> {
self.get_file(key).map(|f| Box::new(f) as Box<dyn ReadSeek>)
}
/// Remove the given key from the cache.
pub fn remove<K: AsRef<OsStr>>(&mut self, key: K) -> Result<()> {
match self.lru.remove(key.as_ref()) {
Some(_) => {
let path = self.rel_to_abs_path(key.as_ref());
fs::remove_file(&path).map_err(|e| {
error!("Error removing file from cache: `{:?}`: {}", path, e);
Into::into(e)
})
}
None => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::fs::{self, File};
use super::{Error, LruDiskCache};
use filetime::{set_file_times, FileTime};
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use tempfile::TempDir;
struct TestFixture {
/// Temp directory.
pub tempdir: TempDir,
}
fn create_file<T: AsRef<Path>, F: FnOnce(File) -> io::Result<()>>(
dir: &Path,
path: T,
fill_contents: F,
) -> io::Result<PathBuf> {
let b = dir.join(path);
fs::create_dir_all(b.parent().unwrap())?;
let f = fs::File::create(&b)?;
fill_contents(f)?;
b.canonicalize()
}
/// Set the last modified time of `path` backwards by `seconds` seconds.
fn set_mtime_back<T: AsRef<Path>>(path: T, seconds: usize) {
let m = fs::metadata(path.as_ref()).unwrap();
let t = FileTime::from_last_modification_time(&m);
let t = FileTime::from_unix_time(t.unix_seconds() - seconds as i64, t.nanoseconds());
set_file_times(path, t, t).unwrap();
}
fn read_all<R: Read>(r: &mut R) -> io::Result<Vec<u8>> {
let mut v = vec![];
r.read_to_end(&mut v)?;
Ok(v)
}
impl TestFixture {
pub fn new() -> TestFixture {
TestFixture {
tempdir: tempfile::Builder::new()
.prefix("lru-disk-cache-test")
.tempdir()
.unwrap(),
}
}
pub fn tmp(&self) -> &Path {
self.tempdir.path()
}
pub fn create_file<T: AsRef<Path>>(&self, path: T, size: usize) -> PathBuf {
create_file(self.tempdir.path(), path, |mut f| {
f.write_all(&vec![0; size])
})
.unwrap()
}
}
#[test]
fn test_empty_dir() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp(), 1024).unwrap();
}
#[test]
fn test_missing_root() {
let f = TestFixture::new();
LruDiskCache::new(f.tmp().join("not-here"), 1024).unwrap();
}
#[test]
fn test_some_existing_files() {
let f = TestFixture::new();
f.create_file("file1", 10);
f.create_file("file2", 10);
let c = LruDiskCache::new(f.tmp(), 20).unwrap();
assert_eq!(c.size(), 20);
assert_eq!(c.len(), 2);
}
#[test]
fn test_existing_file_too_large() {
let f = TestFixture::new();
// Create files explicitly in the past.
set_mtime_back(f.create_file("file1", 10), 10);
set_mtime_back(f.create_file("file2", 10), 5);
let c = LruDiskCache::new(f.tmp(), 15).unwrap();
assert_eq!(c.size(), 10);
assert_eq!(c.len(), 1);
assert!(!c.contains_key("file1"));
assert!(c.contains_key("file2"));
}
#[test]
fn test_existing_files_lru_mtime() { | set_mtime_back(f.create_file("file1", 10), 5);
set_mtime_back(f.create_file("file2", 10), 10);
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The oldest file on disk should have been removed.
assert!(!c.contains_key("file2"));
assert!(c.contains_key("file1"));
}
#[test]
fn test_insert_bytes() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("a/b/c", &[0; 10]).unwrap();
assert!(c.contains_key("a/b/c"));
c.insert_bytes("a/b/d", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// Adding this third file should put the cache above the limit.
c.insert_bytes("x/y/z", &[0; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("a/b/c"));
assert!(!f.tmp().join("a/b/c").exists());
}
#[test]
fn test_insert_bytes_exact() {
// Test that files adding up to exactly the size limit works.
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 20).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
assert_eq!(c.size(), 20);
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file1"));
}
#[test]
fn test_add_get_lru() {
let f = TestFixture::new();
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
c.insert_bytes("file1", &[1; 10]).unwrap();
c.insert_bytes("file2", &[2; 10]).unwrap();
// Get the file to bump its LRU status.
assert_eq!(
read_all(&mut c.get("file1").unwrap()).unwrap(),
vec![1u8; 10]
);
// Adding this third file should put the cache above the limit.
c.insert_bytes("file3", &[3; 10]).unwrap();
assert_eq!(c.size(), 20);
// The least-recently-used file should have been removed.
assert!(!c.contains_key("file2"));
}
// Get rid of the cache, to test that the LRU persists on-disk as mtimes.
// This is hacky, but mtime resolution on my mac with HFS+ is only 1 second, so we either
// need to have a 1 second sleep in the test (boo) or adjust the mtimes back a bit so
// that updating one file to the current time actually works to make it newer.
set_mtime_back(f.tmp().join("file1"), 5);
set_mtime_back(f.tmp().join("file3"), 5);
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
// Bump file1 again.
c.get("file1").unwrap();
}
// Now check that the on-disk mtimes were updated and used.
{
let mut c = LruDiskCache::new(f.tmp(), 25).unwrap();
assert!(c.contains_key("file1"));
assert!(c.contains_key("file3"));
assert_eq!(c.size(), 20);
// Add another file to bump out the least-recently-used.
c.insert_bytes("file4", &[4; 10]).unwrap();
assert_eq!(c.size(), 20);
assert!(!c.contains_key("file3"));
assert!(c.contains_key("file1"));
}
}
#[test]
fn test_insert_bytes_too_large() {
let f = TestFixture::new();
let mut c = LruDiskCache::new(f.tmp(), 1).unwrap();
match c.insert_bytes("a/b/c", &[0; 2]) {
Err(Error::FileTooLarge) => {}
x => panic!("Unexpected result: {:?}", x),
}
}
#[test]
fn test_insert_file() {
let f = TestFixture::new();
let p1 = f.create_file("file1", 10);
let p2 = f.create_file("file2", 10);
let p3 = f.create_file("file3", 10);
let mut c = LruDiskCache::new(f.tmp().join("cache"), 25).unwrap();
c.insert_file("file1", &p1).unwrap();
assert_eq!(c.len(), 1);
c.insert_file("file2", &p2).unwrap();
assert_eq!(c.len(), 2);
// Get the file to bump its LRU status.
assert_eq!(
read_all(&mut c.get("file1").unwrap()).unwrap(),
vec![0u8; 10]
);
| let f = TestFixture::new();
// Create files explicitly in the past. | random_line_split |
lib.rs | use bevy::{
prelude::*,
render::camera::Camera,
render::color::Color,
render::mesh::{VertexAttribute, VertexAttributeValues},
render::pipeline::PrimitiveTopology,
window::CursorMoved,
};
pub struct PickingPlugin;
impl Plugin for PickingPlugin {
fn build(&self, app: &mut AppBuilder) {
app.init_resource::<PickState>()
.init_resource::<PickHighlightParams>()
.add_system(pick_mesh.system())
.add_system(select_mesh.system())
.add_system(pick_highlighting.system());
}
}
pub struct PickState {
cursor_event_reader: EventReader<CursorMoved>,
ordered_pick_list: Vec<PickIntersection>,
topmost_pick: Option<PickIntersection>,
}
impl PickState {
pub fn list(&self) -> &Vec<PickIntersection> {
&self.ordered_pick_list
}
pub fn top(&self) -> &Option<PickIntersection> |
}
impl Default for PickState {
fn default() -> Self {
PickState {
cursor_event_reader: EventReader::default(),
ordered_pick_list: Vec::new(),
topmost_pick: None,
}
}
}
/// Holds the entity associated with a mesh as well as it's computed intersection from a pick ray cast
#[derive(Debug, PartialOrd, PartialEq, Copy, Clone)]
pub struct PickIntersection {
entity: Entity,
pick_coord_ndc: Vec3,
}
impl PickIntersection {
fn new(entity: Entity, pick_coord_ndc: Vec3) -> Self {
PickIntersection {
entity,
pick_coord_ndc,
}
}
pub fn get_pick_coord_ndc(&self) -> Vec3 {
self.pick_coord_ndc
}
pub fn get_pick_coord_world(&self, projection_matrix: Mat4, view_matrix: Mat4) -> Vec3 {
let world_pos: Vec4 = (projection_matrix * view_matrix)
.inverse()
.mul_vec4(self.pick_coord_ndc.extend(1.0));
(world_pos / world_pos.w()).truncate().into()
}
}
#[derive(Debug)]
pub struct PickHighlightParams {
hover_color: Color,
selection_color: Color,
}
impl PickHighlightParams {
pub fn set_hover_color(&mut self, color: Color) {
self.hover_color = color;
}
pub fn set_selection_color(&mut self, color: Color) {
self.selection_color = color;
}
}
impl Default for PickHighlightParams {
fn default() -> Self {
PickHighlightParams {
hover_color: Color::rgb(0.3, 0.5, 0.8),
selection_color: Color::rgb(0.3, 0.8, 0.5),
}
}
}
/// Marks an entity as pickable
#[derive(Debug)]
pub struct PickableMesh {
camera_entity: Entity,
bounding_sphere: Option<BoundSphere>,
pick_coord_ndc: Option<Vec3>,
}
impl PickableMesh {
pub fn new(camera_entity: Entity) -> Self {
PickableMesh {
camera_entity,
bounding_sphere: None,
pick_coord_ndc: None,
}
}
pub fn get_pick_coord_ndc(&self) -> Option<Vec3> {
self.pick_coord_ndc
}
}
/// Meshes with `SelectableMesh` will have selection state managed
#[derive(Debug)]
pub struct SelectablePickMesh {
selected: bool,
}
impl SelectablePickMesh {
pub fn new() -> Self {
SelectablePickMesh { selected: false }
}
pub fn selected(&self) -> bool {
self.selected
}
}
/// Meshes with `HighlightablePickMesh` will be highlighted when hovered over. If the mesh also has
/// the `SelectablePickMesh` component, it will highlight when selected.
#[derive(Debug)]
pub struct HighlightablePickMesh {
// Stores the initial color of the mesh material prior to selecting/hovering
initial_color: Option<Color>,
}
impl HighlightablePickMesh {
pub fn new() -> Self {
HighlightablePickMesh {
initial_color: None,
}
}
}
/// Defines a bounding sphere with a center point coordinate and a radius, used for picking
#[derive(Debug)]
struct BoundSphere {
mesh_radius: f32,
transformed_radius: Option<f32>,
ndc_def: Option<NdcBoundingCircle>,
}
impl From<&Mesh> for BoundSphere {
fn from(mesh: &Mesh) -> Self {
let mut mesh_radius = 0f32;
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
panic!("Non-TriangleList mesh supplied for bounding sphere generation")
}
let mut vertex_positions = Vec::new();
for attribute in mesh.attributes.iter() {
if attribute.name == VertexAttribute::POSITION {
vertex_positions = match &attribute.values {
VertexAttributeValues::Float3(positions) => positions.clone(),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
};
}
}
if let Some(indices) = &mesh.indices {
for index in indices.iter() {
mesh_radius =
mesh_radius.max(Vec3::from(vertex_positions[*index as usize]).length());
}
}
BoundSphere {
mesh_radius,
transformed_radius: None,
ndc_def: None,
}
}
}
/// Created from a BoundSphere, this represents a circle that bounds the entity's mesh when the
/// bounding sphere is projected onto the screen. Note this is not as simple as transforming the
/// sphere's origin into ndc and copying the radius. Due to rectillinear projection, the sphere
/// will be projected onto the screen as an ellipse if it is not perfectly centered at 0,0 in ndc.
/// Scale ndc circle based on linear function "abs(x(sec(arctan(tan(b/2)))-1)) + 1" where b = FOV
/// All the trig can be simplified to a coeff "c" abs(x*c+1)
#[derive(Debug)]
struct NdcBoundingCircle {
center: Vec2,
radius: f32,
}
/// Given the current selected and hovered meshes and provided materials, update the meshes with the
/// appropriate materials.
fn pick_highlighting(
// Resources
pick_state: Res<PickState>,
mut materials: ResMut<Assets<StandardMaterial>>,
highlight_params: Res<PickHighlightParams>,
// Queries
mut query_picked: Query<(
&mut HighlightablePickMesh,
Changed<PickableMesh>,
&Handle<StandardMaterial>,
Entity,
)>,
mut query_selected: Query<(
&mut HighlightablePickMesh,
Changed<SelectablePickMesh>,
&Handle<StandardMaterial>,
)>,
query_selectables: Query<&SelectablePickMesh>,
) {
// Query selectable entities that have changed
for (mut highlightable, selectable, material_handle) in &mut query_selected.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
// Query highlightable entities that have changed
for (mut highlightable, _pickable, material_handle, entity) in &mut query_picked.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
let mut topmost = false;
if let Some(pick_depth) = pick_state.topmost_pick {
topmost = pick_depth.entity == entity;
}
if topmost {
*current_color = highlight_params.hover_color;
} else {
if let Ok(mut query) = query_selectables.entity(entity) {
if let Some(selectable) = query.get() {
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
} else {
*current_color = initial_color;
}
}
}
}
/// Given the currently hovered mesh, checks for a user click and if detected, sets the selected
/// field in the entity's component to true.
fn select_mesh(
// Resources
pick_state: Res<PickState>,
mouse_button_inputs: Res<Input<MouseButton>>,
// Queries
mut query: Query<&mut SelectablePickMesh>,
) {
if mouse_button_inputs.just_pressed(MouseButton::Left) {
// Deselect everything
for mut selectable in &mut query.iter() {
selectable.selected = false;
}
if let Some(pick_depth) = pick_state.topmost_pick {
if let Ok(mut top_mesh) = query.get_mut::<SelectablePickMesh>(pick_depth.entity) {
top_mesh.selected = true;
}
}
}
}
/// Casts a ray into the scene from the cursor position, tracking pickable meshes that are hit.
fn pick_mesh(
// Resources
mut pick_state: ResMut<PickState>,
cursor: Res<Events<CursorMoved>>,
meshes: Res<Assets<Mesh>>,
windows: Res<Windows>,
// Queries
mut mesh_query: Query<(&Handle<Mesh>, &Transform, &mut PickableMesh, Entity)>,
mut camera_query: Query<(&Transform, &Camera)>,
) {
// Get the cursor position
let cursor_pos_screen: Vec2 = match pick_state.cursor_event_reader.latest(&cursor) {
Some(cursor_moved) => cursor_moved.position,
None => return,
};
// Get current screen size
let window = windows.get_primary().unwrap();
let screen_size = Vec2::from([window.width as f32, window.height as f32]);
// Normalized device coordinates (NDC) describes cursor position from (-1, -1) to (1, 1)
let cursor_pos_ndc: Vec2 = (cursor_pos_screen / screen_size) * 2.0 - Vec2::from([1.0, 1.0]);
// Get the view transform and projection matrix from the camera
let mut view_matrix = Mat4::zero();
let mut projection_matrix = Mat4::zero();
for (transform, camera) in &mut camera_query.iter() {
view_matrix = transform.value.inverse();
projection_matrix = camera.projection_matrix;
}
// After initial checks completed, clear the pick list
pick_state.ordered_pick_list.clear();
pick_state.topmost_pick = None;
// Iterate through each pickable mesh in the scene
for (mesh_handle, transform, mut pickable, entity) in &mut mesh_query.iter() {
// Use the mesh handle to get a reference to a mesh asset
if let Some(mesh) = meshes.get(mesh_handle) {
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
continue;
}
// The ray cast can hit the same mesh many times, so we need to track which hit is
// closest to the camera, and record that.
let mut hit_depth = f32::MAX;
// We need to transform the mesh vertices' positions from the mesh space to the world
// space using the mesh's transform, move it to the camera's space using the view
// matrix (camera.inverse), and finally, apply the projection matrix. Because column
// matrices are evaluated right to left, we have to order it correctly:
let mesh_to_cam_transform = view_matrix * transform.value;
// Get the vertex positions from the mesh reference resolved from the mesh handle
let vertex_positions: Vec<[f32; 3]> = mesh
.attributes
.iter()
.filter(|attribute| attribute.name == VertexAttribute::POSITION)
.filter_map(|attribute| match &attribute.values {
VertexAttributeValues::Float3(positions) => Some(positions.clone()),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
})
.last()
.unwrap();
// We have everything set up, now we can jump into the mesh's list of indices and
// check triangles for cursor intersection.
if let Some(indices) = &mesh.indices {
let mut hit_found = false;
// Now that we're in the vector of vertex indices, we want to look at the vertex
// positions for each triangle, so we'll take indices in chunks of three, where each
// chunk of three indices are references to the three vertices of a triangle.
for index in indices.chunks(3) {
// Make sure this chunk has 3 vertices to avoid a panic.
if index.len() == 3 {
// Set up an empty container for triangle vertices
let mut triangle: [Vec3; 3] = [Vec3::zero(), Vec3::zero(), Vec3::zero()];
// We can now grab the position of each vertex in the triangle using the
// indices pointing into the position vector. These positions are relative
// to the coordinate system of the mesh the vertex/triangle belongs to. To
// test if the triangle is being hovered over, we need to convert this to
// NDC (normalized device coordinates)
for i in 0..3 {
// Get the raw vertex position using the index
let mut vertex_pos = Vec3::from(vertex_positions[index[i] as usize]);
// Transform the vertex to world space with the mesh transform, then
// into camera space with the view transform.
vertex_pos = mesh_to_cam_transform.transform_point3(vertex_pos);
// This next part seems to be a bug with glam - it should do the divide
// by w perspective math for us, instead we have to do it manually.
// `glam` PR https://github.com/bitshifter/glam-rs/pull/75/files
let transformed = projection_matrix.mul_vec4(vertex_pos.extend(1.0));
let w_recip = transformed.w().abs().recip();
triangle[i] = Vec3::from(transformed.truncate() * w_recip);
}
if!triangle_behind_cam(triangle) {
if point_in_tri(
&cursor_pos_ndc,
&Vec2::new(triangle[0].x(), triangle[0].y()),
&Vec2::new(triangle[1].x(), triangle[1].y()),
&Vec2::new(triangle[2].x(), triangle[2].y()),
) {
hit_found = true;
if triangle[0].z() < hit_depth {
hit_depth = triangle[0].z();
}
}
}
}
}
// Finished going through the current mesh, update pick states
let pick_coord_ndc = cursor_pos_ndc.extend(hit_depth);
pickable.pick_coord_ndc = Some(pick_coord_ndc);
if hit_found {
pick_state
.ordered_pick_list
.push(PickIntersection::new(entity, pick_coord_ndc));
}
} else {
// If we get here the mesh doesn't have an index list!
panic!(
"No index matrix found in mesh {:?}\n{:?}",
mesh_handle, mesh
);
}
}
}
// Sort the pick list
pick_state
.ordered_pick_list
.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// The pick_state resource we have access to is not sorted, so we need to manually grab the
// lowest value;
if!pick_state.ordered_pick_list.is_empty() {
let mut nearest_index = 0usize;
let mut nearest_depth = f32::MAX;
for (index, pick) in pick_state.ordered_pick_list.iter().enumerate() {
let current_depth = pick.pick_coord_ndc.z();
if current_depth < nearest_depth {
nearest_depth = current_depth;
nearest_index = index;
}
}
pick_state.topmost_pick = Some(pick_state.ordered_pick_list[nearest_index]);
}
}
/// Compute the area of a triangle given 2D vertex coordinates, "/2" removed to save an operation
fn double_tri_area(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
f32::abs(a.x() * (b.y() - c.y()) + b.x() * (c.y() - a.y()) + c.x() * (a.y() - b.y()))
}
/// Checks if a point is inside a triangle by comparing the summed areas of the triangles, the point
/// is inside the triangle if the areas are equal. An epsilon is used due to floating point error.
/// Todo: barycentric method
fn point_in_tri(p: &Vec2, a: &Vec2, b: &Vec2, c: &Vec2) -> bool {
let area = double_tri_area(a, b, c);
let pab = double_tri_area(p, a, b);
let pac = double_tri_area(p, a, c);
let pbc = double_tri_area(p, b, c);
let area_tris = pab + pac + pbc;
let epsilon = 0.00001;
let result: bool = f32::abs(area - area_tris) < epsilon;
/*
if result {
println!("Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
} else {
println!("No Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
}
*/
result
}
/// Checkes if a triangle is visibly pickable in the camera frustum.
fn triangle_behind_cam(triangle: [Vec3; 3]) -> bool {
// Find the maximum signed z value
let max_z = triangle
.iter()
.fold(-1.0, |max, x| if x.z() > max { x.z() } else { max });
// If the maximum z value is less than zero, all vertices are behind the camera
max_z < 0.0
}
| {
&self.topmost_pick
} | identifier_body |
lib.rs | use bevy::{
prelude::*,
render::camera::Camera,
render::color::Color,
render::mesh::{VertexAttribute, VertexAttributeValues},
render::pipeline::PrimitiveTopology,
window::CursorMoved,
};
pub struct PickingPlugin;
impl Plugin for PickingPlugin {
fn build(&self, app: &mut AppBuilder) {
app.init_resource::<PickState>()
.init_resource::<PickHighlightParams>()
.add_system(pick_mesh.system())
.add_system(select_mesh.system())
.add_system(pick_highlighting.system());
}
}
pub struct PickState {
cursor_event_reader: EventReader<CursorMoved>,
ordered_pick_list: Vec<PickIntersection>,
topmost_pick: Option<PickIntersection>,
}
impl PickState {
pub fn list(&self) -> &Vec<PickIntersection> {
&self.ordered_pick_list
}
pub fn top(&self) -> &Option<PickIntersection> {
&self.topmost_pick
}
}
impl Default for PickState {
fn default() -> Self {
PickState {
cursor_event_reader: EventReader::default(),
ordered_pick_list: Vec::new(),
topmost_pick: None,
}
}
}
/// Holds the entity associated with a mesh as well as it's computed intersection from a pick ray cast
#[derive(Debug, PartialOrd, PartialEq, Copy, Clone)]
pub struct PickIntersection {
entity: Entity,
pick_coord_ndc: Vec3,
}
impl PickIntersection {
fn new(entity: Entity, pick_coord_ndc: Vec3) -> Self {
PickIntersection {
entity,
pick_coord_ndc,
}
}
pub fn get_pick_coord_ndc(&self) -> Vec3 {
self.pick_coord_ndc
}
pub fn get_pick_coord_world(&self, projection_matrix: Mat4, view_matrix: Mat4) -> Vec3 {
let world_pos: Vec4 = (projection_matrix * view_matrix)
.inverse()
.mul_vec4(self.pick_coord_ndc.extend(1.0));
(world_pos / world_pos.w()).truncate().into()
}
}
#[derive(Debug)]
pub struct PickHighlightParams {
hover_color: Color,
selection_color: Color,
}
impl PickHighlightParams {
pub fn set_hover_color(&mut self, color: Color) {
self.hover_color = color;
}
pub fn set_selection_color(&mut self, color: Color) {
self.selection_color = color;
}
}
impl Default for PickHighlightParams {
fn default() -> Self {
PickHighlightParams {
hover_color: Color::rgb(0.3, 0.5, 0.8),
selection_color: Color::rgb(0.3, 0.8, 0.5),
}
}
}
/// Marks an entity as pickable
#[derive(Debug)]
pub struct PickableMesh {
camera_entity: Entity,
bounding_sphere: Option<BoundSphere>,
pick_coord_ndc: Option<Vec3>,
}
impl PickableMesh {
pub fn new(camera_entity: Entity) -> Self {
PickableMesh {
camera_entity,
bounding_sphere: None,
pick_coord_ndc: None,
}
}
pub fn get_pick_coord_ndc(&self) -> Option<Vec3> {
self.pick_coord_ndc
}
}
/// Meshes with `SelectableMesh` will have selection state managed
#[derive(Debug)]
pub struct SelectablePickMesh {
selected: bool,
}
impl SelectablePickMesh {
pub fn new() -> Self {
SelectablePickMesh { selected: false }
}
pub fn selected(&self) -> bool {
self.selected
}
}
/// Meshes with `HighlightablePickMesh` will be highlighted when hovered over. If the mesh also has
/// the `SelectablePickMesh` component, it will highlight when selected.
#[derive(Debug)]
pub struct HighlightablePickMesh {
// Stores the initial color of the mesh material prior to selecting/hovering
initial_color: Option<Color>,
}
impl HighlightablePickMesh {
pub fn new() -> Self {
HighlightablePickMesh {
initial_color: None,
}
}
}
/// Defines a bounding sphere with a center point coordinate and a radius, used for picking
#[derive(Debug)]
struct BoundSphere {
mesh_radius: f32,
transformed_radius: Option<f32>,
ndc_def: Option<NdcBoundingCircle>,
}
impl From<&Mesh> for BoundSphere {
fn from(mesh: &Mesh) -> Self {
let mut mesh_radius = 0f32;
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
panic!("Non-TriangleList mesh supplied for bounding sphere generation")
}
let mut vertex_positions = Vec::new();
for attribute in mesh.attributes.iter() {
if attribute.name == VertexAttribute::POSITION {
vertex_positions = match &attribute.values {
VertexAttributeValues::Float3(positions) => positions.clone(),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
};
}
}
if let Some(indices) = &mesh.indices {
for index in indices.iter() {
mesh_radius =
mesh_radius.max(Vec3::from(vertex_positions[*index as usize]).length());
}
}
BoundSphere {
mesh_radius,
transformed_radius: None,
ndc_def: None,
}
}
}
/// Created from a BoundSphere, this represents a circle that bounds the entity's mesh when the
/// bounding sphere is projected onto the screen. Note this is not as simple as transforming the
/// sphere's origin into ndc and copying the radius. Due to rectillinear projection, the sphere
/// will be projected onto the screen as an ellipse if it is not perfectly centered at 0,0 in ndc.
/// Scale ndc circle based on linear function "abs(x(sec(arctan(tan(b/2)))-1)) + 1" where b = FOV
/// All the trig can be simplified to a coeff "c" abs(x*c+1)
#[derive(Debug)]
struct NdcBoundingCircle {
center: Vec2,
radius: f32,
}
/// Given the current selected and hovered meshes and provided materials, update the meshes with the
/// appropriate materials.
fn pick_highlighting(
// Resources
pick_state: Res<PickState>,
mut materials: ResMut<Assets<StandardMaterial>>,
highlight_params: Res<PickHighlightParams>,
// Queries
mut query_picked: Query<(
&mut HighlightablePickMesh,
Changed<PickableMesh>,
&Handle<StandardMaterial>,
Entity,
)>,
mut query_selected: Query<(
&mut HighlightablePickMesh,
Changed<SelectablePickMesh>,
&Handle<StandardMaterial>,
)>,
query_selectables: Query<&SelectablePickMesh>,
) {
// Query selectable entities that have changed
for (mut highlightable, selectable, material_handle) in &mut query_selected.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
// Query highlightable entities that have changed
for (mut highlightable, _pickable, material_handle, entity) in &mut query_picked.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
let mut topmost = false;
if let Some(pick_depth) = pick_state.topmost_pick {
topmost = pick_depth.entity == entity;
}
if topmost {
*current_color = highlight_params.hover_color;
} else {
if let Ok(mut query) = query_selectables.entity(entity) {
if let Some(selectable) = query.get() {
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
} else {
*current_color = initial_color;
}
}
}
}
/// Given the currently hovered mesh, checks for a user click and if detected, sets the selected
/// field in the entity's component to true.
fn select_mesh(
// Resources
pick_state: Res<PickState>,
mouse_button_inputs: Res<Input<MouseButton>>,
// Queries
mut query: Query<&mut SelectablePickMesh>,
) {
if mouse_button_inputs.just_pressed(MouseButton::Left) {
// Deselect everything
for mut selectable in &mut query.iter() {
selectable.selected = false;
}
if let Some(pick_depth) = pick_state.topmost_pick {
if let Ok(mut top_mesh) = query.get_mut::<SelectablePickMesh>(pick_depth.entity) {
top_mesh.selected = true;
}
}
}
}
/// Casts a ray into the scene from the cursor position, tracking pickable meshes that are hit.
fn pick_mesh(
// Resources
mut pick_state: ResMut<PickState>,
cursor: Res<Events<CursorMoved>>,
meshes: Res<Assets<Mesh>>,
windows: Res<Windows>,
// Queries
mut mesh_query: Query<(&Handle<Mesh>, &Transform, &mut PickableMesh, Entity)>,
mut camera_query: Query<(&Transform, &Camera)>,
) {
// Get the cursor position
let cursor_pos_screen: Vec2 = match pick_state.cursor_event_reader.latest(&cursor) {
Some(cursor_moved) => cursor_moved.position,
None => return,
};
// Get current screen size
let window = windows.get_primary().unwrap();
let screen_size = Vec2::from([window.width as f32, window.height as f32]);
// Normalized device coordinates (NDC) describes cursor position from (-1, -1) to (1, 1)
let cursor_pos_ndc: Vec2 = (cursor_pos_screen / screen_size) * 2.0 - Vec2::from([1.0, 1.0]);
// Get the view transform and projection matrix from the camera
let mut view_matrix = Mat4::zero();
let mut projection_matrix = Mat4::zero();
for (transform, camera) in &mut camera_query.iter() {
view_matrix = transform.value.inverse();
projection_matrix = camera.projection_matrix;
}
// After initial checks completed, clear the pick list
pick_state.ordered_pick_list.clear();
pick_state.topmost_pick = None;
// Iterate through each pickable mesh in the scene
for (mesh_handle, transform, mut pickable, entity) in &mut mesh_query.iter() {
// Use the mesh handle to get a reference to a mesh asset
if let Some(mesh) = meshes.get(mesh_handle) {
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
continue;
}
// The ray cast can hit the same mesh many times, so we need to track which hit is
// closest to the camera, and record that.
let mut hit_depth = f32::MAX;
// We need to transform the mesh vertices' positions from the mesh space to the world
// space using the mesh's transform, move it to the camera's space using the view
// matrix (camera.inverse), and finally, apply the projection matrix. Because column
// matrices are evaluated right to left, we have to order it correctly:
let mesh_to_cam_transform = view_matrix * transform.value;
// Get the vertex positions from the mesh reference resolved from the mesh handle
let vertex_positions: Vec<[f32; 3]> = mesh
.attributes
.iter()
.filter(|attribute| attribute.name == VertexAttribute::POSITION)
.filter_map(|attribute| match &attribute.values {
VertexAttributeValues::Float3(positions) => Some(positions.clone()),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
})
.last()
.unwrap();
// We have everything set up, now we can jump into the mesh's list of indices and
// check triangles for cursor intersection.
if let Some(indices) = &mesh.indices {
let mut hit_found = false;
// Now that we're in the vector of vertex indices, we want to look at the vertex
// positions for each triangle, so we'll take indices in chunks of three, where each
// chunk of three indices are references to the three vertices of a triangle.
for index in indices.chunks(3) {
// Make sure this chunk has 3 vertices to avoid a panic.
if index.len() == 3 {
// Set up an empty container for triangle vertices
let mut triangle: [Vec3; 3] = [Vec3::zero(), Vec3::zero(), Vec3::zero()];
// We can now grab the position of each vertex in the triangle using the
// indices pointing into the position vector. These positions are relative
// to the coordinate system of the mesh the vertex/triangle belongs to. To
// test if the triangle is being hovered over, we need to convert this to
// NDC (normalized device coordinates)
for i in 0..3 {
// Get the raw vertex position using the index
let mut vertex_pos = Vec3::from(vertex_positions[index[i] as usize]);
// Transform the vertex to world space with the mesh transform, then
// into camera space with the view transform.
vertex_pos = mesh_to_cam_transform.transform_point3(vertex_pos);
// This next part seems to be a bug with glam - it should do the divide
// by w perspective math for us, instead we have to do it manually.
// `glam` PR https://github.com/bitshifter/glam-rs/pull/75/files
let transformed = projection_matrix.mul_vec4(vertex_pos.extend(1.0));
let w_recip = transformed.w().abs().recip();
triangle[i] = Vec3::from(transformed.truncate() * w_recip);
}
if!triangle_behind_cam(triangle) {
if point_in_tri(
&cursor_pos_ndc,
&Vec2::new(triangle[0].x(), triangle[0].y()),
&Vec2::new(triangle[1].x(), triangle[1].y()),
&Vec2::new(triangle[2].x(), triangle[2].y()),
) {
hit_found = true;
if triangle[0].z() < hit_depth {
hit_depth = triangle[0].z();
}
}
}
}
}
// Finished going through the current mesh, update pick states
let pick_coord_ndc = cursor_pos_ndc.extend(hit_depth);
pickable.pick_coord_ndc = Some(pick_coord_ndc);
if hit_found {
pick_state
.ordered_pick_list
.push(PickIntersection::new(entity, pick_coord_ndc));
}
} else {
// If we get here the mesh doesn't have an index list!
panic!(
"No index matrix found in mesh {:?}\n{:?}",
mesh_handle, mesh
);
}
}
}
// Sort the pick list
pick_state
.ordered_pick_list
.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// The pick_state resource we have access to is not sorted, so we need to manually grab the
// lowest value;
if!pick_state.ordered_pick_list.is_empty() {
let mut nearest_index = 0usize;
let mut nearest_depth = f32::MAX;
for (index, pick) in pick_state.ordered_pick_list.iter().enumerate() {
let current_depth = pick.pick_coord_ndc.z();
if current_depth < nearest_depth {
nearest_depth = current_depth;
nearest_index = index;
}
}
pick_state.topmost_pick = Some(pick_state.ordered_pick_list[nearest_index]);
}
}
/// Compute the area of a triangle given 2D vertex coordinates, "/2" removed to save an operation
fn double_tri_area(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
f32::abs(a.x() * (b.y() - c.y()) + b.x() * (c.y() - a.y()) + c.x() * (a.y() - b.y()))
}
/// Checks if a point is inside a triangle by comparing the summed areas of the triangles, the point
/// is inside the triangle if the areas are equal. An epsilon is used due to floating point error.
/// Todo: barycentric method
fn point_in_tri(p: &Vec2, a: &Vec2, b: &Vec2, c: &Vec2) -> bool {
let area = double_tri_area(a, b, c);
let pab = double_tri_area(p, a, b);
let pac = double_tri_area(p, a, c);
let pbc = double_tri_area(p, b, c);
let area_tris = pab + pac + pbc;
let epsilon = 0.00001;
let result: bool = f32::abs(area - area_tris) < epsilon;
/*
if result {
println!("Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
} else {
println!("No Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
}
*/
result
}
/// Checkes if a triangle is visibly pickable in the camera frustum. | // Find the maximum signed z value
let max_z = triangle
.iter()
.fold(-1.0, |max, x| if x.z() > max { x.z() } else { max });
// If the maximum z value is less than zero, all vertices are behind the camera
max_z < 0.0
} | fn triangle_behind_cam(triangle: [Vec3; 3]) -> bool { | random_line_split |
lib.rs | use bevy::{
prelude::*,
render::camera::Camera,
render::color::Color,
render::mesh::{VertexAttribute, VertexAttributeValues},
render::pipeline::PrimitiveTopology,
window::CursorMoved,
};
pub struct PickingPlugin;
impl Plugin for PickingPlugin {
fn build(&self, app: &mut AppBuilder) {
app.init_resource::<PickState>()
.init_resource::<PickHighlightParams>()
.add_system(pick_mesh.system())
.add_system(select_mesh.system())
.add_system(pick_highlighting.system());
}
}
pub struct PickState {
cursor_event_reader: EventReader<CursorMoved>,
ordered_pick_list: Vec<PickIntersection>,
topmost_pick: Option<PickIntersection>,
}
impl PickState {
pub fn | (&self) -> &Vec<PickIntersection> {
&self.ordered_pick_list
}
pub fn top(&self) -> &Option<PickIntersection> {
&self.topmost_pick
}
}
impl Default for PickState {
fn default() -> Self {
PickState {
cursor_event_reader: EventReader::default(),
ordered_pick_list: Vec::new(),
topmost_pick: None,
}
}
}
/// Holds the entity associated with a mesh as well as it's computed intersection from a pick ray cast
#[derive(Debug, PartialOrd, PartialEq, Copy, Clone)]
pub struct PickIntersection {
entity: Entity,
pick_coord_ndc: Vec3,
}
impl PickIntersection {
fn new(entity: Entity, pick_coord_ndc: Vec3) -> Self {
PickIntersection {
entity,
pick_coord_ndc,
}
}
pub fn get_pick_coord_ndc(&self) -> Vec3 {
self.pick_coord_ndc
}
pub fn get_pick_coord_world(&self, projection_matrix: Mat4, view_matrix: Mat4) -> Vec3 {
let world_pos: Vec4 = (projection_matrix * view_matrix)
.inverse()
.mul_vec4(self.pick_coord_ndc.extend(1.0));
(world_pos / world_pos.w()).truncate().into()
}
}
#[derive(Debug)]
pub struct PickHighlightParams {
hover_color: Color,
selection_color: Color,
}
impl PickHighlightParams {
pub fn set_hover_color(&mut self, color: Color) {
self.hover_color = color;
}
pub fn set_selection_color(&mut self, color: Color) {
self.selection_color = color;
}
}
impl Default for PickHighlightParams {
fn default() -> Self {
PickHighlightParams {
hover_color: Color::rgb(0.3, 0.5, 0.8),
selection_color: Color::rgb(0.3, 0.8, 0.5),
}
}
}
/// Marks an entity as pickable
#[derive(Debug)]
pub struct PickableMesh {
camera_entity: Entity,
bounding_sphere: Option<BoundSphere>,
pick_coord_ndc: Option<Vec3>,
}
impl PickableMesh {
pub fn new(camera_entity: Entity) -> Self {
PickableMesh {
camera_entity,
bounding_sphere: None,
pick_coord_ndc: None,
}
}
pub fn get_pick_coord_ndc(&self) -> Option<Vec3> {
self.pick_coord_ndc
}
}
/// Meshes with `SelectableMesh` will have selection state managed
#[derive(Debug)]
pub struct SelectablePickMesh {
selected: bool,
}
impl SelectablePickMesh {
pub fn new() -> Self {
SelectablePickMesh { selected: false }
}
pub fn selected(&self) -> bool {
self.selected
}
}
/// Meshes with `HighlightablePickMesh` will be highlighted when hovered over. If the mesh also has
/// the `SelectablePickMesh` component, it will highlight when selected.
#[derive(Debug)]
pub struct HighlightablePickMesh {
// Stores the initial color of the mesh material prior to selecting/hovering
initial_color: Option<Color>,
}
impl HighlightablePickMesh {
pub fn new() -> Self {
HighlightablePickMesh {
initial_color: None,
}
}
}
/// Defines a bounding sphere with a center point coordinate and a radius, used for picking
#[derive(Debug)]
struct BoundSphere {
mesh_radius: f32,
transformed_radius: Option<f32>,
ndc_def: Option<NdcBoundingCircle>,
}
impl From<&Mesh> for BoundSphere {
fn from(mesh: &Mesh) -> Self {
let mut mesh_radius = 0f32;
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
panic!("Non-TriangleList mesh supplied for bounding sphere generation")
}
let mut vertex_positions = Vec::new();
for attribute in mesh.attributes.iter() {
if attribute.name == VertexAttribute::POSITION {
vertex_positions = match &attribute.values {
VertexAttributeValues::Float3(positions) => positions.clone(),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
};
}
}
if let Some(indices) = &mesh.indices {
for index in indices.iter() {
mesh_radius =
mesh_radius.max(Vec3::from(vertex_positions[*index as usize]).length());
}
}
BoundSphere {
mesh_radius,
transformed_radius: None,
ndc_def: None,
}
}
}
/// Created from a BoundSphere, this represents a circle that bounds the entity's mesh when the
/// bounding sphere is projected onto the screen. Note this is not as simple as transforming the
/// sphere's origin into ndc and copying the radius. Due to rectillinear projection, the sphere
/// will be projected onto the screen as an ellipse if it is not perfectly centered at 0,0 in ndc.
/// Scale ndc circle based on linear function "abs(x(sec(arctan(tan(b/2)))-1)) + 1" where b = FOV
/// All the trig can be simplified to a coeff "c" abs(x*c+1)
#[derive(Debug)]
struct NdcBoundingCircle {
center: Vec2,
radius: f32,
}
/// Given the current selected and hovered meshes and provided materials, update the meshes with the
/// appropriate materials.
fn pick_highlighting(
// Resources
pick_state: Res<PickState>,
mut materials: ResMut<Assets<StandardMaterial>>,
highlight_params: Res<PickHighlightParams>,
// Queries
mut query_picked: Query<(
&mut HighlightablePickMesh,
Changed<PickableMesh>,
&Handle<StandardMaterial>,
Entity,
)>,
mut query_selected: Query<(
&mut HighlightablePickMesh,
Changed<SelectablePickMesh>,
&Handle<StandardMaterial>,
)>,
query_selectables: Query<&SelectablePickMesh>,
) {
// Query selectable entities that have changed
for (mut highlightable, selectable, material_handle) in &mut query_selected.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
// Query highlightable entities that have changed
for (mut highlightable, _pickable, material_handle, entity) in &mut query_picked.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
let mut topmost = false;
if let Some(pick_depth) = pick_state.topmost_pick {
topmost = pick_depth.entity == entity;
}
if topmost {
*current_color = highlight_params.hover_color;
} else {
if let Ok(mut query) = query_selectables.entity(entity) {
if let Some(selectable) = query.get() {
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
} else {
*current_color = initial_color;
}
}
}
}
/// Given the currently hovered mesh, checks for a user click and if detected, sets the selected
/// field in the entity's component to true.
fn select_mesh(
// Resources
pick_state: Res<PickState>,
mouse_button_inputs: Res<Input<MouseButton>>,
// Queries
mut query: Query<&mut SelectablePickMesh>,
) {
if mouse_button_inputs.just_pressed(MouseButton::Left) {
// Deselect everything
for mut selectable in &mut query.iter() {
selectable.selected = false;
}
if let Some(pick_depth) = pick_state.topmost_pick {
if let Ok(mut top_mesh) = query.get_mut::<SelectablePickMesh>(pick_depth.entity) {
top_mesh.selected = true;
}
}
}
}
/// Casts a ray into the scene from the cursor position, tracking pickable meshes that are hit.
fn pick_mesh(
// Resources
mut pick_state: ResMut<PickState>,
cursor: Res<Events<CursorMoved>>,
meshes: Res<Assets<Mesh>>,
windows: Res<Windows>,
// Queries
mut mesh_query: Query<(&Handle<Mesh>, &Transform, &mut PickableMesh, Entity)>,
mut camera_query: Query<(&Transform, &Camera)>,
) {
// Get the cursor position
let cursor_pos_screen: Vec2 = match pick_state.cursor_event_reader.latest(&cursor) {
Some(cursor_moved) => cursor_moved.position,
None => return,
};
// Get current screen size
let window = windows.get_primary().unwrap();
let screen_size = Vec2::from([window.width as f32, window.height as f32]);
// Normalized device coordinates (NDC) describes cursor position from (-1, -1) to (1, 1)
let cursor_pos_ndc: Vec2 = (cursor_pos_screen / screen_size) * 2.0 - Vec2::from([1.0, 1.0]);
// Get the view transform and projection matrix from the camera
let mut view_matrix = Mat4::zero();
let mut projection_matrix = Mat4::zero();
for (transform, camera) in &mut camera_query.iter() {
view_matrix = transform.value.inverse();
projection_matrix = camera.projection_matrix;
}
// After initial checks completed, clear the pick list
pick_state.ordered_pick_list.clear();
pick_state.topmost_pick = None;
// Iterate through each pickable mesh in the scene
for (mesh_handle, transform, mut pickable, entity) in &mut mesh_query.iter() {
// Use the mesh handle to get a reference to a mesh asset
if let Some(mesh) = meshes.get(mesh_handle) {
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
continue;
}
// The ray cast can hit the same mesh many times, so we need to track which hit is
// closest to the camera, and record that.
let mut hit_depth = f32::MAX;
// We need to transform the mesh vertices' positions from the mesh space to the world
// space using the mesh's transform, move it to the camera's space using the view
// matrix (camera.inverse), and finally, apply the projection matrix. Because column
// matrices are evaluated right to left, we have to order it correctly:
let mesh_to_cam_transform = view_matrix * transform.value;
// Get the vertex positions from the mesh reference resolved from the mesh handle
let vertex_positions: Vec<[f32; 3]> = mesh
.attributes
.iter()
.filter(|attribute| attribute.name == VertexAttribute::POSITION)
.filter_map(|attribute| match &attribute.values {
VertexAttributeValues::Float3(positions) => Some(positions.clone()),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
})
.last()
.unwrap();
// We have everything set up, now we can jump into the mesh's list of indices and
// check triangles for cursor intersection.
if let Some(indices) = &mesh.indices {
let mut hit_found = false;
// Now that we're in the vector of vertex indices, we want to look at the vertex
// positions for each triangle, so we'll take indices in chunks of three, where each
// chunk of three indices are references to the three vertices of a triangle.
for index in indices.chunks(3) {
// Make sure this chunk has 3 vertices to avoid a panic.
if index.len() == 3 {
// Set up an empty container for triangle vertices
let mut triangle: [Vec3; 3] = [Vec3::zero(), Vec3::zero(), Vec3::zero()];
// We can now grab the position of each vertex in the triangle using the
// indices pointing into the position vector. These positions are relative
// to the coordinate system of the mesh the vertex/triangle belongs to. To
// test if the triangle is being hovered over, we need to convert this to
// NDC (normalized device coordinates)
for i in 0..3 {
// Get the raw vertex position using the index
let mut vertex_pos = Vec3::from(vertex_positions[index[i] as usize]);
// Transform the vertex to world space with the mesh transform, then
// into camera space with the view transform.
vertex_pos = mesh_to_cam_transform.transform_point3(vertex_pos);
// This next part seems to be a bug with glam - it should do the divide
// by w perspective math for us, instead we have to do it manually.
// `glam` PR https://github.com/bitshifter/glam-rs/pull/75/files
let transformed = projection_matrix.mul_vec4(vertex_pos.extend(1.0));
let w_recip = transformed.w().abs().recip();
triangle[i] = Vec3::from(transformed.truncate() * w_recip);
}
if!triangle_behind_cam(triangle) {
if point_in_tri(
&cursor_pos_ndc,
&Vec2::new(triangle[0].x(), triangle[0].y()),
&Vec2::new(triangle[1].x(), triangle[1].y()),
&Vec2::new(triangle[2].x(), triangle[2].y()),
) {
hit_found = true;
if triangle[0].z() < hit_depth {
hit_depth = triangle[0].z();
}
}
}
}
}
// Finished going through the current mesh, update pick states
let pick_coord_ndc = cursor_pos_ndc.extend(hit_depth);
pickable.pick_coord_ndc = Some(pick_coord_ndc);
if hit_found {
pick_state
.ordered_pick_list
.push(PickIntersection::new(entity, pick_coord_ndc));
}
} else {
// If we get here the mesh doesn't have an index list!
panic!(
"No index matrix found in mesh {:?}\n{:?}",
mesh_handle, mesh
);
}
}
}
// Sort the pick list
pick_state
.ordered_pick_list
.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// The pick_state resource we have access to is not sorted, so we need to manually grab the
// lowest value;
if!pick_state.ordered_pick_list.is_empty() {
let mut nearest_index = 0usize;
let mut nearest_depth = f32::MAX;
for (index, pick) in pick_state.ordered_pick_list.iter().enumerate() {
let current_depth = pick.pick_coord_ndc.z();
if current_depth < nearest_depth {
nearest_depth = current_depth;
nearest_index = index;
}
}
pick_state.topmost_pick = Some(pick_state.ordered_pick_list[nearest_index]);
}
}
/// Compute the area of a triangle given 2D vertex coordinates, "/2" removed to save an operation
fn double_tri_area(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
f32::abs(a.x() * (b.y() - c.y()) + b.x() * (c.y() - a.y()) + c.x() * (a.y() - b.y()))
}
/// Checks if a point is inside a triangle by comparing the summed areas of the triangles, the point
/// is inside the triangle if the areas are equal. An epsilon is used due to floating point error.
/// Todo: barycentric method
fn point_in_tri(p: &Vec2, a: &Vec2, b: &Vec2, c: &Vec2) -> bool {
let area = double_tri_area(a, b, c);
let pab = double_tri_area(p, a, b);
let pac = double_tri_area(p, a, c);
let pbc = double_tri_area(p, b, c);
let area_tris = pab + pac + pbc;
let epsilon = 0.00001;
let result: bool = f32::abs(area - area_tris) < epsilon;
/*
if result {
println!("Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
} else {
println!("No Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
}
*/
result
}
/// Checkes if a triangle is visibly pickable in the camera frustum.
fn triangle_behind_cam(triangle: [Vec3; 3]) -> bool {
// Find the maximum signed z value
let max_z = triangle
.iter()
.fold(-1.0, |max, x| if x.z() > max { x.z() } else { max });
// If the maximum z value is less than zero, all vertices are behind the camera
max_z < 0.0
}
| list | identifier_name |
lib.rs | use bevy::{
prelude::*,
render::camera::Camera,
render::color::Color,
render::mesh::{VertexAttribute, VertexAttributeValues},
render::pipeline::PrimitiveTopology,
window::CursorMoved,
};
pub struct PickingPlugin;
impl Plugin for PickingPlugin {
fn build(&self, app: &mut AppBuilder) {
app.init_resource::<PickState>()
.init_resource::<PickHighlightParams>()
.add_system(pick_mesh.system())
.add_system(select_mesh.system())
.add_system(pick_highlighting.system());
}
}
pub struct PickState {
cursor_event_reader: EventReader<CursorMoved>,
ordered_pick_list: Vec<PickIntersection>,
topmost_pick: Option<PickIntersection>,
}
impl PickState {
pub fn list(&self) -> &Vec<PickIntersection> {
&self.ordered_pick_list
}
pub fn top(&self) -> &Option<PickIntersection> {
&self.topmost_pick
}
}
impl Default for PickState {
fn default() -> Self {
PickState {
cursor_event_reader: EventReader::default(),
ordered_pick_list: Vec::new(),
topmost_pick: None,
}
}
}
/// Holds the entity associated with a mesh as well as it's computed intersection from a pick ray cast
#[derive(Debug, PartialOrd, PartialEq, Copy, Clone)]
pub struct PickIntersection {
entity: Entity,
pick_coord_ndc: Vec3,
}
impl PickIntersection {
fn new(entity: Entity, pick_coord_ndc: Vec3) -> Self {
PickIntersection {
entity,
pick_coord_ndc,
}
}
pub fn get_pick_coord_ndc(&self) -> Vec3 {
self.pick_coord_ndc
}
pub fn get_pick_coord_world(&self, projection_matrix: Mat4, view_matrix: Mat4) -> Vec3 {
let world_pos: Vec4 = (projection_matrix * view_matrix)
.inverse()
.mul_vec4(self.pick_coord_ndc.extend(1.0));
(world_pos / world_pos.w()).truncate().into()
}
}
#[derive(Debug)]
pub struct PickHighlightParams {
hover_color: Color,
selection_color: Color,
}
impl PickHighlightParams {
pub fn set_hover_color(&mut self, color: Color) {
self.hover_color = color;
}
pub fn set_selection_color(&mut self, color: Color) {
self.selection_color = color;
}
}
impl Default for PickHighlightParams {
fn default() -> Self {
PickHighlightParams {
hover_color: Color::rgb(0.3, 0.5, 0.8),
selection_color: Color::rgb(0.3, 0.8, 0.5),
}
}
}
/// Marks an entity as pickable
#[derive(Debug)]
pub struct PickableMesh {
camera_entity: Entity,
bounding_sphere: Option<BoundSphere>,
pick_coord_ndc: Option<Vec3>,
}
impl PickableMesh {
pub fn new(camera_entity: Entity) -> Self {
PickableMesh {
camera_entity,
bounding_sphere: None,
pick_coord_ndc: None,
}
}
pub fn get_pick_coord_ndc(&self) -> Option<Vec3> {
self.pick_coord_ndc
}
}
/// Meshes with `SelectableMesh` will have selection state managed
#[derive(Debug)]
pub struct SelectablePickMesh {
selected: bool,
}
impl SelectablePickMesh {
pub fn new() -> Self {
SelectablePickMesh { selected: false }
}
pub fn selected(&self) -> bool {
self.selected
}
}
/// Meshes with `HighlightablePickMesh` will be highlighted when hovered over. If the mesh also has
/// the `SelectablePickMesh` component, it will highlight when selected.
#[derive(Debug)]
pub struct HighlightablePickMesh {
// Stores the initial color of the mesh material prior to selecting/hovering
initial_color: Option<Color>,
}
impl HighlightablePickMesh {
pub fn new() -> Self {
HighlightablePickMesh {
initial_color: None,
}
}
}
/// Defines a bounding sphere with a center point coordinate and a radius, used for picking
#[derive(Debug)]
struct BoundSphere {
mesh_radius: f32,
transformed_radius: Option<f32>,
ndc_def: Option<NdcBoundingCircle>,
}
impl From<&Mesh> for BoundSphere {
fn from(mesh: &Mesh) -> Self {
let mut mesh_radius = 0f32;
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
panic!("Non-TriangleList mesh supplied for bounding sphere generation")
}
let mut vertex_positions = Vec::new();
for attribute in mesh.attributes.iter() {
if attribute.name == VertexAttribute::POSITION {
vertex_positions = match &attribute.values {
VertexAttributeValues::Float3(positions) => positions.clone(),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
};
}
}
if let Some(indices) = &mesh.indices {
for index in indices.iter() {
mesh_radius =
mesh_radius.max(Vec3::from(vertex_positions[*index as usize]).length());
}
}
BoundSphere {
mesh_radius,
transformed_radius: None,
ndc_def: None,
}
}
}
/// Created from a BoundSphere, this represents a circle that bounds the entity's mesh when the
/// bounding sphere is projected onto the screen. Note this is not as simple as transforming the
/// sphere's origin into ndc and copying the radius. Due to rectillinear projection, the sphere
/// will be projected onto the screen as an ellipse if it is not perfectly centered at 0,0 in ndc.
/// Scale ndc circle based on linear function "abs(x(sec(arctan(tan(b/2)))-1)) + 1" where b = FOV
/// All the trig can be simplified to a coeff "c" abs(x*c+1)
#[derive(Debug)]
struct NdcBoundingCircle {
center: Vec2,
radius: f32,
}
/// Given the current selected and hovered meshes and provided materials, update the meshes with the
/// appropriate materials.
fn pick_highlighting(
// Resources
pick_state: Res<PickState>,
mut materials: ResMut<Assets<StandardMaterial>>,
highlight_params: Res<PickHighlightParams>,
// Queries
mut query_picked: Query<(
&mut HighlightablePickMesh,
Changed<PickableMesh>,
&Handle<StandardMaterial>,
Entity,
)>,
mut query_selected: Query<(
&mut HighlightablePickMesh,
Changed<SelectablePickMesh>,
&Handle<StandardMaterial>,
)>,
query_selectables: Query<&SelectablePickMesh>,
) {
// Query selectable entities that have changed
for (mut highlightable, selectable, material_handle) in &mut query_selected.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
if selectable.selected | else {
*current_color = initial_color;
}
}
// Query highlightable entities that have changed
for (mut highlightable, _pickable, material_handle, entity) in &mut query_picked.iter() {
let current_color = &mut materials.get_mut(material_handle).unwrap().albedo;
let initial_color = match highlightable.initial_color {
None => {
highlightable.initial_color = Some(*current_color);
*current_color
}
Some(color) => color,
};
let mut topmost = false;
if let Some(pick_depth) = pick_state.topmost_pick {
topmost = pick_depth.entity == entity;
}
if topmost {
*current_color = highlight_params.hover_color;
} else {
if let Ok(mut query) = query_selectables.entity(entity) {
if let Some(selectable) = query.get() {
if selectable.selected {
*current_color = highlight_params.selection_color;
} else {
*current_color = initial_color;
}
}
} else {
*current_color = initial_color;
}
}
}
}
/// Given the currently hovered mesh, checks for a user click and if detected, sets the selected
/// field in the entity's component to true.
fn select_mesh(
// Resources
pick_state: Res<PickState>,
mouse_button_inputs: Res<Input<MouseButton>>,
// Queries
mut query: Query<&mut SelectablePickMesh>,
) {
if mouse_button_inputs.just_pressed(MouseButton::Left) {
// Deselect everything
for mut selectable in &mut query.iter() {
selectable.selected = false;
}
if let Some(pick_depth) = pick_state.topmost_pick {
if let Ok(mut top_mesh) = query.get_mut::<SelectablePickMesh>(pick_depth.entity) {
top_mesh.selected = true;
}
}
}
}
/// Casts a ray into the scene from the cursor position, tracking pickable meshes that are hit.
fn pick_mesh(
// Resources
mut pick_state: ResMut<PickState>,
cursor: Res<Events<CursorMoved>>,
meshes: Res<Assets<Mesh>>,
windows: Res<Windows>,
// Queries
mut mesh_query: Query<(&Handle<Mesh>, &Transform, &mut PickableMesh, Entity)>,
mut camera_query: Query<(&Transform, &Camera)>,
) {
// Get the cursor position
let cursor_pos_screen: Vec2 = match pick_state.cursor_event_reader.latest(&cursor) {
Some(cursor_moved) => cursor_moved.position,
None => return,
};
// Get current screen size
let window = windows.get_primary().unwrap();
let screen_size = Vec2::from([window.width as f32, window.height as f32]);
// Normalized device coordinates (NDC) describes cursor position from (-1, -1) to (1, 1)
let cursor_pos_ndc: Vec2 = (cursor_pos_screen / screen_size) * 2.0 - Vec2::from([1.0, 1.0]);
// Get the view transform and projection matrix from the camera
let mut view_matrix = Mat4::zero();
let mut projection_matrix = Mat4::zero();
for (transform, camera) in &mut camera_query.iter() {
view_matrix = transform.value.inverse();
projection_matrix = camera.projection_matrix;
}
// After initial checks completed, clear the pick list
pick_state.ordered_pick_list.clear();
pick_state.topmost_pick = None;
// Iterate through each pickable mesh in the scene
for (mesh_handle, transform, mut pickable, entity) in &mut mesh_query.iter() {
// Use the mesh handle to get a reference to a mesh asset
if let Some(mesh) = meshes.get(mesh_handle) {
if mesh.primitive_topology!= PrimitiveTopology::TriangleList {
continue;
}
// The ray cast can hit the same mesh many times, so we need to track which hit is
// closest to the camera, and record that.
let mut hit_depth = f32::MAX;
// We need to transform the mesh vertices' positions from the mesh space to the world
// space using the mesh's transform, move it to the camera's space using the view
// matrix (camera.inverse), and finally, apply the projection matrix. Because column
// matrices are evaluated right to left, we have to order it correctly:
let mesh_to_cam_transform = view_matrix * transform.value;
// Get the vertex positions from the mesh reference resolved from the mesh handle
let vertex_positions: Vec<[f32; 3]> = mesh
.attributes
.iter()
.filter(|attribute| attribute.name == VertexAttribute::POSITION)
.filter_map(|attribute| match &attribute.values {
VertexAttributeValues::Float3(positions) => Some(positions.clone()),
_ => panic!("Unexpected vertex types in VertexAttribute::POSITION"),
})
.last()
.unwrap();
// We have everything set up, now we can jump into the mesh's list of indices and
// check triangles for cursor intersection.
if let Some(indices) = &mesh.indices {
let mut hit_found = false;
// Now that we're in the vector of vertex indices, we want to look at the vertex
// positions for each triangle, so we'll take indices in chunks of three, where each
// chunk of three indices are references to the three vertices of a triangle.
for index in indices.chunks(3) {
// Make sure this chunk has 3 vertices to avoid a panic.
if index.len() == 3 {
// Set up an empty container for triangle vertices
let mut triangle: [Vec3; 3] = [Vec3::zero(), Vec3::zero(), Vec3::zero()];
// We can now grab the position of each vertex in the triangle using the
// indices pointing into the position vector. These positions are relative
// to the coordinate system of the mesh the vertex/triangle belongs to. To
// test if the triangle is being hovered over, we need to convert this to
// NDC (normalized device coordinates)
for i in 0..3 {
// Get the raw vertex position using the index
let mut vertex_pos = Vec3::from(vertex_positions[index[i] as usize]);
// Transform the vertex to world space with the mesh transform, then
// into camera space with the view transform.
vertex_pos = mesh_to_cam_transform.transform_point3(vertex_pos);
// This next part seems to be a bug with glam - it should do the divide
// by w perspective math for us, instead we have to do it manually.
// `glam` PR https://github.com/bitshifter/glam-rs/pull/75/files
let transformed = projection_matrix.mul_vec4(vertex_pos.extend(1.0));
let w_recip = transformed.w().abs().recip();
triangle[i] = Vec3::from(transformed.truncate() * w_recip);
}
if!triangle_behind_cam(triangle) {
if point_in_tri(
&cursor_pos_ndc,
&Vec2::new(triangle[0].x(), triangle[0].y()),
&Vec2::new(triangle[1].x(), triangle[1].y()),
&Vec2::new(triangle[2].x(), triangle[2].y()),
) {
hit_found = true;
if triangle[0].z() < hit_depth {
hit_depth = triangle[0].z();
}
}
}
}
}
// Finished going through the current mesh, update pick states
let pick_coord_ndc = cursor_pos_ndc.extend(hit_depth);
pickable.pick_coord_ndc = Some(pick_coord_ndc);
if hit_found {
pick_state
.ordered_pick_list
.push(PickIntersection::new(entity, pick_coord_ndc));
}
} else {
// If we get here the mesh doesn't have an index list!
panic!(
"No index matrix found in mesh {:?}\n{:?}",
mesh_handle, mesh
);
}
}
}
// Sort the pick list
pick_state
.ordered_pick_list
.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// The pick_state resource we have access to is not sorted, so we need to manually grab the
// lowest value;
if!pick_state.ordered_pick_list.is_empty() {
let mut nearest_index = 0usize;
let mut nearest_depth = f32::MAX;
for (index, pick) in pick_state.ordered_pick_list.iter().enumerate() {
let current_depth = pick.pick_coord_ndc.z();
if current_depth < nearest_depth {
nearest_depth = current_depth;
nearest_index = index;
}
}
pick_state.topmost_pick = Some(pick_state.ordered_pick_list[nearest_index]);
}
}
/// Compute the area of a triangle given 2D vertex coordinates, "/2" removed to save an operation
fn double_tri_area(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 {
f32::abs(a.x() * (b.y() - c.y()) + b.x() * (c.y() - a.y()) + c.x() * (a.y() - b.y()))
}
/// Checks if a point is inside a triangle by comparing the summed areas of the triangles, the point
/// is inside the triangle if the areas are equal. An epsilon is used due to floating point error.
/// Todo: barycentric method
fn point_in_tri(p: &Vec2, a: &Vec2, b: &Vec2, c: &Vec2) -> bool {
let area = double_tri_area(a, b, c);
let pab = double_tri_area(p, a, b);
let pac = double_tri_area(p, a, c);
let pbc = double_tri_area(p, b, c);
let area_tris = pab + pac + pbc;
let epsilon = 0.00001;
let result: bool = f32::abs(area - area_tris) < epsilon;
/*
if result {
println!("Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
} else {
println!("No Hit: {:.3} {:.3} {:.3},{:.3} {:.3},{:.3} {:.3},{:.3} ", area, area_tris, a.x(), a.y(), b.x(), b.y(), c.x(), c.y());
}
*/
result
}
/// Checkes if a triangle is visibly pickable in the camera frustum.
fn triangle_behind_cam(triangle: [Vec3; 3]) -> bool {
// Find the maximum signed z value
let max_z = triangle
.iter()
.fold(-1.0, |max, x| if x.z() > max { x.z() } else { max });
// If the maximum z value is less than zero, all vertices are behind the camera
max_z < 0.0
}
| {
*current_color = highlight_params.selection_color;
} | conditional_block |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs, unused_crate_dependencies)]
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use polkadot_node_subsystem::{
errors::{ChainApiError, RuntimeApiError}, PerLeafSpan, SubsystemSender, jaeger,
messages::{
CandidateBackingMessage, ChainApiMessage, ProvisionableData, ProvisionerInherentData,
ProvisionerMessage,
},
};
use polkadot_node_subsystem_util::{
self as util, JobSubsystem, JobSender,
request_availability_cores, request_persisted_validation_data, JobTrait, metrics::{self, prometheus},
};
use polkadot_primitives::v1::{
BackedCandidate, BlockNumber, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption,
SignedAvailabilityBitfield, ValidatorIndex,
};
use std::{pin::Pin, collections::BTreeMap, sync::Arc};
use thiserror::Error;
use futures_timer::Delay;
/// How long to wait before proposing.
const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000);
const LOG_TARGET: &str = "parachain::provisioner";
enum InherentAfter {
Ready,
Wait(Delay),
}
impl InherentAfter {
fn new_from_now() -> Self {
InherentAfter::Wait(Delay::new(PRE_PROPOSE_TIMEOUT))
}
fn is_ready(&self) -> bool {
match *self {
InherentAfter::Ready => true,
InherentAfter::Wait(_) => false,
}
}
async fn ready(&mut self) |
}
/// A per-relay-parent job for the provisioning subsystem.
pub struct ProvisioningJob {
relay_parent: Hash,
receiver: mpsc::Receiver<ProvisionerMessage>,
backed_candidates: Vec<CandidateReceipt>,
signed_bitfields: Vec<SignedAvailabilityBitfield>,
metrics: Metrics,
inherent_after: InherentAfter,
awaiting_inherent: Vec<oneshot::Sender<ProvisionerInherentData>>
}
/// Errors in the provisioner.
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
Util(#[from] util::Error),
#[error("failed to get availability cores")]
CanceledAvailabilityCores(#[source] oneshot::Canceled),
#[error("failed to get persisted validation data")]
CanceledPersistedValidationData(#[source] oneshot::Canceled),
#[error("failed to get block number")]
CanceledBlockNumber(#[source] oneshot::Canceled),
#[error("failed to get backed candidates")]
CanceledBackedCandidates(#[source] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("failed to send message to ChainAPI")]
ChainApiMessageSend(#[source] mpsc::SendError),
#[error("failed to send message to CandidateBacking to get backed candidates")]
GetBackedCandidatesSend(#[source] mpsc::SendError),
#[error("failed to send return message with Inherents")]
InherentDataReturnChannel,
#[error("backed candidate does not correspond to selected candidate; check logic in provisioner")]
BackedCandidateOrderingProblem,
}
impl JobTrait for ProvisioningJob {
type ToJob = ProvisionerMessage;
type Error = Error;
type RunArgs = ();
type Metrics = Metrics;
const NAME: &'static str = "ProvisioningJob";
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
fn run<S: SubsystemSender>(
relay_parent: Hash,
span: Arc<jaeger::Span>,
_run_args: Self::RunArgs,
metrics: Self::Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
mut sender: JobSender<S>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
let job = ProvisioningJob::new(
relay_parent,
metrics,
receiver,
);
job.run_loop(sender.subsystem_sender(), PerLeafSpan::new(span, "provisioner")).await
}
.boxed()
}
}
impl ProvisioningJob {
fn new(
relay_parent: Hash,
metrics: Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
) -> Self {
Self {
relay_parent,
receiver,
backed_candidates: Vec::new(),
signed_bitfields: Vec::new(),
metrics,
inherent_after: InherentAfter::new_from_now(),
awaiting_inherent: Vec::new(),
}
}
async fn run_loop(
mut self,
sender: &mut impl SubsystemSender,
span: PerLeafSpan,
) -> Result<(), Error> {
use ProvisionerMessage::{
ProvisionableData, RequestInherentData,
};
loop {
futures::select! {
msg = self.receiver.next().fuse() => match msg {
Some(RequestInherentData(_, return_sender)) => {
let _span = span.child("req-inherent-data");
let _timer = self.metrics.time_request_inherent_data();
if self.inherent_after.is_ready() {
self.send_inherent_data(sender, vec![return_sender]).await;
} else {
self.awaiting_inherent.push(return_sender);
}
}
Some(ProvisionableData(_, data)) => {
let span = span.child("provisionable-data");
let _timer = self.metrics.time_provisionable_data();
self.note_provisionable_data(&span, data);
}
None => break,
},
_ = self.inherent_after.ready().fuse() => {
let _span = span.child("send-inherent-data");
let return_senders = std::mem::take(&mut self.awaiting_inherent);
if!return_senders.is_empty() {
self.send_inherent_data(sender, return_senders).await;
}
}
}
}
Ok(())
}
async fn send_inherent_data(
&mut self,
sender: &mut impl SubsystemSender,
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
) {
if let Err(err) = send_inherent_data(
self.relay_parent,
&self.signed_bitfields,
&self.backed_candidates,
return_senders,
sender,
)
.await
{
tracing::warn!(target: LOG_TARGET, err =?err, "failed to assemble or send inherent data");
self.metrics.on_inherent_data_request(Err(()));
} else {
self.metrics.on_inherent_data_request(Ok(()));
}
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn note_provisionable_data(&mut self, span: &jaeger::Span, provisionable_data: ProvisionableData) {
match provisionable_data {
ProvisionableData::Bitfield(_, signed_bitfield) => {
self.signed_bitfields.push(signed_bitfield)
}
ProvisionableData::BackedCandidate(backed_candidate) => {
let _span = span.child("provisionable-backed")
.with_para_id(backed_candidate.descriptor().para_id);
self.backed_candidates.push(backed_candidate)
}
_ => {}
}
}
}
type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// The provisioner is the subsystem best suited to choosing which specific
/// backed candidates and availability bitfields should be assembled into the
/// block. To engage this functionality, a
/// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of
/// non-conflicting candidates and the appropriate bitfields. Non-conflicting
/// means that there are never two distinct parachain candidates included for
/// the same parachain and that new parachain candidates cannot be included
/// until the previous one either gets declared available or expired.
///
/// The main complication here is going to be around handling
/// occupied-core-assumptions. We might have candidates that are only
/// includable when some bitfields are included. And we might have candidates
/// that are not includable when certain bitfields are included.
///
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))]
async fn send_inherent_data(
relay_parent: Hash,
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
from_job: &mut impl SubsystemSender,
) -> Result<(), Error> {
let availability_cores = request_availability_cores(relay_parent, from_job)
.await
.await.map_err(|err| Error::CanceledAvailabilityCores(err))??;
let bitfields = select_availability_bitfields(&availability_cores, bitfields);
let candidates = select_candidates(
&availability_cores,
&bitfields,
candidates,
relay_parent,
from_job,
).await?;
let inherent_data = ProvisionerInherentData {
bitfields,
backed_candidates: candidates,
disputes: Vec::new(), // until disputes are implemented.
};
for return_sender in return_senders {
return_sender.send(inherent_data.clone()).map_err(|_data| Error::InherentDataReturnChannel)?;
}
Ok(())
}
/// In general, we want to pick all the bitfields. However, we have the following constraints:
///
/// - not more than one per validator
/// - each 1 bit must correspond to an occupied core
///
/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability,
/// we pick the one with the greatest number of 1 bits.
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn select_availability_bitfields(
cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
) -> Vec<SignedAvailabilityBitfield> {
let mut selected: BTreeMap<ValidatorIndex, SignedAvailabilityBitfield> = BTreeMap::new();
'a:
for bitfield in bitfields.iter().cloned() {
if bitfield.payload().0.len()!= cores.len() {
continue
}
let is_better = selected.get(&bitfield.validator_index())
.map_or(true, |b| b.payload().0.count_ones() < bitfield.payload().0.count_ones());
if!is_better { continue }
for (idx, _) in cores.iter().enumerate().filter(|v|!v.1.is_occupied()) {
// Bit is set for an unoccupied core - invalid
if *bitfield.payload().0.get(idx).as_deref().unwrap_or(&false) {
continue 'a
}
}
let _ = selected.insert(bitfield.validator_index(), bitfield);
}
selected.into_iter().map(|(_, b)| b).collect()
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn select_candidates(
availability_cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<Vec<BackedCandidate>, Error> {
let block_number = get_block_number_under_construction(relay_parent, sender).await?;
let mut selected_candidates =
Vec::with_capacity(candidates.len().min(availability_cores.len()));
for (core_idx, core) in availability_cores.iter().enumerate() {
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(occupied_core) => {
if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) {
if let Some(ref scheduled_core) = occupied_core.next_up_on_available {
(scheduled_core, OccupiedCoreAssumption::Included)
} else {
continue;
}
} else {
if occupied_core.time_out_at!= block_number {
continue;
}
if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
(scheduled_core, OccupiedCoreAssumption::TimedOut)
} else {
continue;
}
}
}
CoreState::Free => continue,
};
let validation_data = match request_persisted_validation_data(
relay_parent,
scheduled_core.para_id,
assumption,
sender,
)
.await
.await.map_err(|err| Error::CanceledPersistedValidationData(err))??
{
Some(v) => v,
None => continue,
};
let computed_validation_data_hash = validation_data.hash();
// we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria
if let Some(candidate) = candidates.iter().find(|backed_candidate| {
let descriptor = &backed_candidate.descriptor;
descriptor.para_id == scheduled_core.para_id
&& descriptor.persisted_validation_data_hash == computed_validation_data_hash
}) {
let candidate_hash = candidate.hash();
tracing::trace!(
target: LOG_TARGET,
"Selecting candidate {}. para_id={} core={}",
candidate_hash,
candidate.descriptor.para_id,
core_idx,
);
selected_candidates.push(candidate_hash);
}
}
// now get the backed candidates corresponding to these candidate receipts
let (tx, rx) = oneshot::channel();
sender.send_message(CandidateBackingMessage::GetBackedCandidates(
relay_parent,
selected_candidates.clone(),
tx,
).into()).await;
let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
// `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates`
// _should_ preserve that property, but let's just make sure.
//
// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate
// maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them
// in order, we can ensure that the backed candidates are also in order.
let mut backed_idx = 0;
for selected in selected_candidates {
if selected == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() {
backed_idx += 1;
}
}
if candidates.len()!= backed_idx {
Err(Error::BackedCandidateOrderingProblem)?;
}
// keep only one candidate with validation code.
let mut with_validation_code = false;
candidates.retain(|c| {
if c.candidate.commitments.new_validation_code.is_some() {
if with_validation_code {
return false
}
with_validation_code = true;
}
true
});
tracing::debug!(
target: LOG_TARGET,
"Selected {} candidates for {} cores",
candidates.len(),
availability_cores.len(),
);
Ok(candidates)
}
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn get_block_number_under_construction(
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<BlockNumber, Error> {
let (tx, rx) = oneshot::channel();
sender
.send_message(ChainApiMessage::BlockNumber(
relay_parent,
tx,
).into())
.await;
match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
Ok(Some(n)) => Ok(n + 1),
Ok(None) => Ok(0),
Err(err) => Err(err.into()),
}
}
/// The availability bitfield for a given core is the transpose
/// of a set of signed availability bitfields. It goes like this:
///
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn bitfields_indicate_availability(
core_idx: usize,
bitfields: &[SignedAvailabilityBitfield],
availability: &CoreAvailability,
) -> bool {
let mut availability = availability.clone();
let availability_len = availability.len();
for bitfield in bitfields {
let validator_idx = bitfield.validator_index().0 as usize;
match availability.get_mut(validator_idx) {
None => {
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
// simpler to just handle the error internally here.
tracing::warn!(
target: LOG_TARGET,
validator_idx = %validator_idx,
availability_len = %availability_len,
"attempted to set a transverse bit at idx {} which is greater than bitfield size {}",
validator_idx,
availability_len,
);
return false;
}
Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx],
}
}
3 * availability.count_ones() >= 2 * availability.len()
}
#[derive(Clone)]
struct MetricsInner {
inherent_data_requests: prometheus::CounterVec<prometheus::U64>,
request_inherent_data: prometheus::Histogram,
provisionable_data: prometheus::Histogram,
}
/// Provisioner metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
fn on_inherent_data_request(&self, response: Result<(), ()>) {
if let Some(metrics) = &self.0 {
match response {
Ok(()) => metrics.inherent_data_requests.with_label_values(&["succeeded"]).inc(),
Err(()) => metrics.inherent_data_requests.with_label_values(&["failed"]).inc(),
}
}
}
/// Provide a timer for `request_inherent_data` which observes on drop.
fn time_request_inherent_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.request_inherent_data.start_timer())
}
/// Provide a timer for `provisionable_data` which observes on drop.
fn time_provisionable_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.provisionable_data.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
inherent_data_requests: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"parachain_inherent_data_requests_total",
"Number of InherentData requests served by provisioner.",
),
&["success"],
)?,
registry,
)?,
request_inherent_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_request_inherent_data",
"Time spent within `provisioner::request_inherent_data`",
)
)?,
registry,
)?,
provisionable_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_provisionable_data",
"Time spent within `provisioner::provisionable_data`",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
/// The provisioning subsystem.
pub type ProvisioningSubsystem<Spawner> = JobSubsystem<ProvisioningJob, Spawner>;
#[cfg(test)]
mod tests;
| {
match *self {
InherentAfter::Ready => {
// Make sure we never end the returned future.
// This is required because the `select!` that calls this future will end in a busy loop.
futures::pending!()
},
InherentAfter::Wait(ref mut d) => {
d.await;
*self = InherentAfter::Ready;
},
}
} | identifier_body |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs, unused_crate_dependencies)]
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use polkadot_node_subsystem::{
errors::{ChainApiError, RuntimeApiError}, PerLeafSpan, SubsystemSender, jaeger,
messages::{
CandidateBackingMessage, ChainApiMessage, ProvisionableData, ProvisionerInherentData,
ProvisionerMessage,
},
};
use polkadot_node_subsystem_util::{
self as util, JobSubsystem, JobSender,
request_availability_cores, request_persisted_validation_data, JobTrait, metrics::{self, prometheus},
};
use polkadot_primitives::v1::{
BackedCandidate, BlockNumber, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption,
SignedAvailabilityBitfield, ValidatorIndex,
};
use std::{pin::Pin, collections::BTreeMap, sync::Arc};
use thiserror::Error;
use futures_timer::Delay;
/// How long to wait before proposing.
const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000);
const LOG_TARGET: &str = "parachain::provisioner";
enum InherentAfter {
Ready,
Wait(Delay),
}
impl InherentAfter {
fn new_from_now() -> Self {
InherentAfter::Wait(Delay::new(PRE_PROPOSE_TIMEOUT))
}
fn is_ready(&self) -> bool {
match *self {
InherentAfter::Ready => true,
InherentAfter::Wait(_) => false,
}
}
async fn ready(&mut self) {
match *self {
InherentAfter::Ready => {
// Make sure we never end the returned future.
// This is required because the `select!` that calls this future will end in a busy loop.
futures::pending!()
},
InherentAfter::Wait(ref mut d) => {
d.await;
*self = InherentAfter::Ready;
},
}
}
}
/// A per-relay-parent job for the provisioning subsystem.
pub struct ProvisioningJob {
relay_parent: Hash,
receiver: mpsc::Receiver<ProvisionerMessage>,
backed_candidates: Vec<CandidateReceipt>,
signed_bitfields: Vec<SignedAvailabilityBitfield>,
metrics: Metrics,
inherent_after: InherentAfter,
awaiting_inherent: Vec<oneshot::Sender<ProvisionerInherentData>>
}
/// Errors in the provisioner.
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
Util(#[from] util::Error),
#[error("failed to get availability cores")]
CanceledAvailabilityCores(#[source] oneshot::Canceled),
#[error("failed to get persisted validation data")]
CanceledPersistedValidationData(#[source] oneshot::Canceled),
#[error("failed to get block number")]
CanceledBlockNumber(#[source] oneshot::Canceled),
#[error("failed to get backed candidates")]
CanceledBackedCandidates(#[source] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("failed to send message to ChainAPI")]
ChainApiMessageSend(#[source] mpsc::SendError),
#[error("failed to send message to CandidateBacking to get backed candidates")]
GetBackedCandidatesSend(#[source] mpsc::SendError),
#[error("failed to send return message with Inherents")]
InherentDataReturnChannel,
#[error("backed candidate does not correspond to selected candidate; check logic in provisioner")]
BackedCandidateOrderingProblem,
}
impl JobTrait for ProvisioningJob {
type ToJob = ProvisionerMessage;
type Error = Error;
type RunArgs = ();
type Metrics = Metrics;
const NAME: &'static str = "ProvisioningJob";
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
fn run<S: SubsystemSender>(
relay_parent: Hash,
span: Arc<jaeger::Span>,
_run_args: Self::RunArgs,
metrics: Self::Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
mut sender: JobSender<S>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
let job = ProvisioningJob::new(
relay_parent,
metrics,
receiver,
);
job.run_loop(sender.subsystem_sender(), PerLeafSpan::new(span, "provisioner")).await
}
.boxed()
}
}
impl ProvisioningJob {
fn new(
relay_parent: Hash,
metrics: Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
) -> Self {
Self {
relay_parent,
receiver,
backed_candidates: Vec::new(),
signed_bitfields: Vec::new(),
metrics,
inherent_after: InherentAfter::new_from_now(),
awaiting_inherent: Vec::new(),
}
}
async fn run_loop(
mut self,
sender: &mut impl SubsystemSender,
span: PerLeafSpan,
) -> Result<(), Error> {
use ProvisionerMessage::{
ProvisionableData, RequestInherentData,
};
loop {
futures::select! {
msg = self.receiver.next().fuse() => match msg {
Some(RequestInherentData(_, return_sender)) => {
let _span = span.child("req-inherent-data");
let _timer = self.metrics.time_request_inherent_data();
if self.inherent_after.is_ready() {
self.send_inherent_data(sender, vec![return_sender]).await;
} else {
self.awaiting_inherent.push(return_sender);
}
}
Some(ProvisionableData(_, data)) => {
let span = span.child("provisionable-data");
let _timer = self.metrics.time_provisionable_data();
self.note_provisionable_data(&span, data);
}
None => break,
},
_ = self.inherent_after.ready().fuse() => {
let _span = span.child("send-inherent-data");
let return_senders = std::mem::take(&mut self.awaiting_inherent);
if!return_senders.is_empty() {
self.send_inherent_data(sender, return_senders).await;
}
}
}
}
Ok(())
}
async fn send_inherent_data(
&mut self,
sender: &mut impl SubsystemSender,
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
) {
if let Err(err) = send_inherent_data(
self.relay_parent,
&self.signed_bitfields,
&self.backed_candidates,
return_senders,
sender,
)
.await
{
tracing::warn!(target: LOG_TARGET, err =?err, "failed to assemble or send inherent data");
self.metrics.on_inherent_data_request(Err(()));
} else {
self.metrics.on_inherent_data_request(Ok(()));
}
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn note_provisionable_data(&mut self, span: &jaeger::Span, provisionable_data: ProvisionableData) {
match provisionable_data {
ProvisionableData::Bitfield(_, signed_bitfield) => {
self.signed_bitfields.push(signed_bitfield)
}
ProvisionableData::BackedCandidate(backed_candidate) => {
let _span = span.child("provisionable-backed")
.with_para_id(backed_candidate.descriptor().para_id);
self.backed_candidates.push(backed_candidate)
}
_ => {}
}
}
}
type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// The provisioner is the subsystem best suited to choosing which specific
/// backed candidates and availability bitfields should be assembled into the
/// block. To engage this functionality, a
/// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of
/// non-conflicting candidates and the appropriate bitfields. Non-conflicting
/// means that there are never two distinct parachain candidates included for
/// the same parachain and that new parachain candidates cannot be included
/// until the previous one either gets declared available or expired.
///
/// The main complication here is going to be around handling
/// occupied-core-assumptions. We might have candidates that are only
/// includable when some bitfields are included. And we might have candidates
/// that are not includable when certain bitfields are included.
///
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))]
async fn send_inherent_data(
relay_parent: Hash,
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
from_job: &mut impl SubsystemSender,
) -> Result<(), Error> {
let availability_cores = request_availability_cores(relay_parent, from_job)
.await
.await.map_err(|err| Error::CanceledAvailabilityCores(err))??;
let bitfields = select_availability_bitfields(&availability_cores, bitfields);
let candidates = select_candidates(
&availability_cores,
&bitfields,
candidates,
relay_parent,
from_job,
).await?;
let inherent_data = ProvisionerInherentData {
bitfields,
backed_candidates: candidates,
disputes: Vec::new(), // until disputes are implemented.
};
for return_sender in return_senders {
return_sender.send(inherent_data.clone()).map_err(|_data| Error::InherentDataReturnChannel)?;
}
Ok(())
}
/// In general, we want to pick all the bitfields. However, we have the following constraints:
///
/// - not more than one per validator
/// - each 1 bit must correspond to an occupied core
///
/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability,
/// we pick the one with the greatest number of 1 bits.
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn select_availability_bitfields(
cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
) -> Vec<SignedAvailabilityBitfield> {
let mut selected: BTreeMap<ValidatorIndex, SignedAvailabilityBitfield> = BTreeMap::new();
'a:
for bitfield in bitfields.iter().cloned() {
if bitfield.payload().0.len()!= cores.len() {
continue
}
let is_better = selected.get(&bitfield.validator_index())
.map_or(true, |b| b.payload().0.count_ones() < bitfield.payload().0.count_ones());
if!is_better { continue }
for (idx, _) in cores.iter().enumerate().filter(|v|!v.1.is_occupied()) {
// Bit is set for an unoccupied core - invalid
if *bitfield.payload().0.get(idx).as_deref().unwrap_or(&false) {
continue 'a
}
}
let _ = selected.insert(bitfield.validator_index(), bitfield);
}
selected.into_iter().map(|(_, b)| b).collect()
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn select_candidates(
availability_cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<Vec<BackedCandidate>, Error> {
let block_number = get_block_number_under_construction(relay_parent, sender).await?;
let mut selected_candidates =
Vec::with_capacity(candidates.len().min(availability_cores.len()));
for (core_idx, core) in availability_cores.iter().enumerate() {
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(occupied_core) => {
if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) {
if let Some(ref scheduled_core) = occupied_core.next_up_on_available {
(scheduled_core, OccupiedCoreAssumption::Included)
} else {
continue;
}
} else {
if occupied_core.time_out_at!= block_number {
continue;
}
if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
(scheduled_core, OccupiedCoreAssumption::TimedOut)
} else {
continue;
}
}
}
CoreState::Free => continue,
};
let validation_data = match request_persisted_validation_data(
relay_parent,
scheduled_core.para_id,
assumption,
sender,
)
.await
.await.map_err(|err| Error::CanceledPersistedValidationData(err))??
{
Some(v) => v,
None => continue,
};
let computed_validation_data_hash = validation_data.hash();
// we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria
if let Some(candidate) = candidates.iter().find(|backed_candidate| {
let descriptor = &backed_candidate.descriptor;
descriptor.para_id == scheduled_core.para_id
&& descriptor.persisted_validation_data_hash == computed_validation_data_hash
}) {
let candidate_hash = candidate.hash();
tracing::trace!(
target: LOG_TARGET,
"Selecting candidate {}. para_id={} core={}",
candidate_hash,
candidate.descriptor.para_id,
core_idx,
);
selected_candidates.push(candidate_hash);
}
}
// now get the backed candidates corresponding to these candidate receipts
let (tx, rx) = oneshot::channel();
sender.send_message(CandidateBackingMessage::GetBackedCandidates(
relay_parent,
selected_candidates.clone(),
tx,
).into()).await;
let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
// `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates`
// _should_ preserve that property, but let's just make sure.
//
// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate
// maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them
// in order, we can ensure that the backed candidates are also in order.
let mut backed_idx = 0;
for selected in selected_candidates {
if selected == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() {
backed_idx += 1;
}
}
if candidates.len()!= backed_idx {
Err(Error::BackedCandidateOrderingProblem)?;
}
// keep only one candidate with validation code.
let mut with_validation_code = false;
candidates.retain(|c| {
if c.candidate.commitments.new_validation_code.is_some() {
if with_validation_code {
return false
}
with_validation_code = true;
}
true
});
tracing::debug!(
target: LOG_TARGET,
"Selected {} candidates for {} cores",
candidates.len(),
availability_cores.len(),
);
Ok(candidates)
}
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn get_block_number_under_construction(
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<BlockNumber, Error> {
let (tx, rx) = oneshot::channel();
sender
.send_message(ChainApiMessage::BlockNumber(
relay_parent,
tx,
).into())
.await;
match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
Ok(Some(n)) => Ok(n + 1),
Ok(None) => Ok(0),
Err(err) => Err(err.into()),
}
}
/// The availability bitfield for a given core is the transpose
/// of a set of signed availability bitfields. It goes like this:
///
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn bitfields_indicate_availability(
core_idx: usize,
bitfields: &[SignedAvailabilityBitfield],
availability: &CoreAvailability, | for bitfield in bitfields {
let validator_idx = bitfield.validator_index().0 as usize;
match availability.get_mut(validator_idx) {
None => {
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
// simpler to just handle the error internally here.
tracing::warn!(
target: LOG_TARGET,
validator_idx = %validator_idx,
availability_len = %availability_len,
"attempted to set a transverse bit at idx {} which is greater than bitfield size {}",
validator_idx,
availability_len,
);
return false;
}
Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx],
}
}
3 * availability.count_ones() >= 2 * availability.len()
}
#[derive(Clone)]
struct MetricsInner {
inherent_data_requests: prometheus::CounterVec<prometheus::U64>,
request_inherent_data: prometheus::Histogram,
provisionable_data: prometheus::Histogram,
}
/// Provisioner metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
fn on_inherent_data_request(&self, response: Result<(), ()>) {
if let Some(metrics) = &self.0 {
match response {
Ok(()) => metrics.inherent_data_requests.with_label_values(&["succeeded"]).inc(),
Err(()) => metrics.inherent_data_requests.with_label_values(&["failed"]).inc(),
}
}
}
/// Provide a timer for `request_inherent_data` which observes on drop.
fn time_request_inherent_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.request_inherent_data.start_timer())
}
/// Provide a timer for `provisionable_data` which observes on drop.
fn time_provisionable_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.provisionable_data.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
inherent_data_requests: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"parachain_inherent_data_requests_total",
"Number of InherentData requests served by provisioner.",
),
&["success"],
)?,
registry,
)?,
request_inherent_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_request_inherent_data",
"Time spent within `provisioner::request_inherent_data`",
)
)?,
registry,
)?,
provisionable_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_provisionable_data",
"Time spent within `provisioner::provisionable_data`",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
/// The provisioning subsystem.
pub type ProvisioningSubsystem<Spawner> = JobSubsystem<ProvisioningJob, Spawner>;
#[cfg(test)]
mod tests; | ) -> bool {
let mut availability = availability.clone();
let availability_len = availability.len();
| random_line_split |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs, unused_crate_dependencies)]
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use polkadot_node_subsystem::{
errors::{ChainApiError, RuntimeApiError}, PerLeafSpan, SubsystemSender, jaeger,
messages::{
CandidateBackingMessage, ChainApiMessage, ProvisionableData, ProvisionerInherentData,
ProvisionerMessage,
},
};
use polkadot_node_subsystem_util::{
self as util, JobSubsystem, JobSender,
request_availability_cores, request_persisted_validation_data, JobTrait, metrics::{self, prometheus},
};
use polkadot_primitives::v1::{
BackedCandidate, BlockNumber, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption,
SignedAvailabilityBitfield, ValidatorIndex,
};
use std::{pin::Pin, collections::BTreeMap, sync::Arc};
use thiserror::Error;
use futures_timer::Delay;
/// How long to wait before proposing.
const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000);
const LOG_TARGET: &str = "parachain::provisioner";
enum InherentAfter {
Ready,
Wait(Delay),
}
impl InherentAfter {
fn new_from_now() -> Self {
InherentAfter::Wait(Delay::new(PRE_PROPOSE_TIMEOUT))
}
fn is_ready(&self) -> bool {
match *self {
InherentAfter::Ready => true,
InherentAfter::Wait(_) => false,
}
}
async fn ready(&mut self) {
match *self {
InherentAfter::Ready => {
// Make sure we never end the returned future.
// This is required because the `select!` that calls this future will end in a busy loop.
futures::pending!()
},
InherentAfter::Wait(ref mut d) => {
d.await;
*self = InherentAfter::Ready;
},
}
}
}
/// A per-relay-parent job for the provisioning subsystem.
pub struct ProvisioningJob {
relay_parent: Hash,
receiver: mpsc::Receiver<ProvisionerMessage>,
backed_candidates: Vec<CandidateReceipt>,
signed_bitfields: Vec<SignedAvailabilityBitfield>,
metrics: Metrics,
inherent_after: InherentAfter,
awaiting_inherent: Vec<oneshot::Sender<ProvisionerInherentData>>
}
/// Errors in the provisioner.
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
Util(#[from] util::Error),
#[error("failed to get availability cores")]
CanceledAvailabilityCores(#[source] oneshot::Canceled),
#[error("failed to get persisted validation data")]
CanceledPersistedValidationData(#[source] oneshot::Canceled),
#[error("failed to get block number")]
CanceledBlockNumber(#[source] oneshot::Canceled),
#[error("failed to get backed candidates")]
CanceledBackedCandidates(#[source] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("failed to send message to ChainAPI")]
ChainApiMessageSend(#[source] mpsc::SendError),
#[error("failed to send message to CandidateBacking to get backed candidates")]
GetBackedCandidatesSend(#[source] mpsc::SendError),
#[error("failed to send return message with Inherents")]
InherentDataReturnChannel,
#[error("backed candidate does not correspond to selected candidate; check logic in provisioner")]
BackedCandidateOrderingProblem,
}
impl JobTrait for ProvisioningJob {
type ToJob = ProvisionerMessage;
type Error = Error;
type RunArgs = ();
type Metrics = Metrics;
const NAME: &'static str = "ProvisioningJob";
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
fn run<S: SubsystemSender>(
relay_parent: Hash,
span: Arc<jaeger::Span>,
_run_args: Self::RunArgs,
metrics: Self::Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
mut sender: JobSender<S>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
let job = ProvisioningJob::new(
relay_parent,
metrics,
receiver,
);
job.run_loop(sender.subsystem_sender(), PerLeafSpan::new(span, "provisioner")).await
}
.boxed()
}
}
impl ProvisioningJob {
fn | (
relay_parent: Hash,
metrics: Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
) -> Self {
Self {
relay_parent,
receiver,
backed_candidates: Vec::new(),
signed_bitfields: Vec::new(),
metrics,
inherent_after: InherentAfter::new_from_now(),
awaiting_inherent: Vec::new(),
}
}
async fn run_loop(
mut self,
sender: &mut impl SubsystemSender,
span: PerLeafSpan,
) -> Result<(), Error> {
use ProvisionerMessage::{
ProvisionableData, RequestInherentData,
};
loop {
futures::select! {
msg = self.receiver.next().fuse() => match msg {
Some(RequestInherentData(_, return_sender)) => {
let _span = span.child("req-inherent-data");
let _timer = self.metrics.time_request_inherent_data();
if self.inherent_after.is_ready() {
self.send_inherent_data(sender, vec![return_sender]).await;
} else {
self.awaiting_inherent.push(return_sender);
}
}
Some(ProvisionableData(_, data)) => {
let span = span.child("provisionable-data");
let _timer = self.metrics.time_provisionable_data();
self.note_provisionable_data(&span, data);
}
None => break,
},
_ = self.inherent_after.ready().fuse() => {
let _span = span.child("send-inherent-data");
let return_senders = std::mem::take(&mut self.awaiting_inherent);
if!return_senders.is_empty() {
self.send_inherent_data(sender, return_senders).await;
}
}
}
}
Ok(())
}
async fn send_inherent_data(
&mut self,
sender: &mut impl SubsystemSender,
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
) {
if let Err(err) = send_inherent_data(
self.relay_parent,
&self.signed_bitfields,
&self.backed_candidates,
return_senders,
sender,
)
.await
{
tracing::warn!(target: LOG_TARGET, err =?err, "failed to assemble or send inherent data");
self.metrics.on_inherent_data_request(Err(()));
} else {
self.metrics.on_inherent_data_request(Ok(()));
}
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn note_provisionable_data(&mut self, span: &jaeger::Span, provisionable_data: ProvisionableData) {
match provisionable_data {
ProvisionableData::Bitfield(_, signed_bitfield) => {
self.signed_bitfields.push(signed_bitfield)
}
ProvisionableData::BackedCandidate(backed_candidate) => {
let _span = span.child("provisionable-backed")
.with_para_id(backed_candidate.descriptor().para_id);
self.backed_candidates.push(backed_candidate)
}
_ => {}
}
}
}
type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// The provisioner is the subsystem best suited to choosing which specific
/// backed candidates and availability bitfields should be assembled into the
/// block. To engage this functionality, a
/// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of
/// non-conflicting candidates and the appropriate bitfields. Non-conflicting
/// means that there are never two distinct parachain candidates included for
/// the same parachain and that new parachain candidates cannot be included
/// until the previous one either gets declared available or expired.
///
/// The main complication here is going to be around handling
/// occupied-core-assumptions. We might have candidates that are only
/// includable when some bitfields are included. And we might have candidates
/// that are not includable when certain bitfields are included.
///
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))]
async fn send_inherent_data(
relay_parent: Hash,
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
from_job: &mut impl SubsystemSender,
) -> Result<(), Error> {
let availability_cores = request_availability_cores(relay_parent, from_job)
.await
.await.map_err(|err| Error::CanceledAvailabilityCores(err))??;
let bitfields = select_availability_bitfields(&availability_cores, bitfields);
let candidates = select_candidates(
&availability_cores,
&bitfields,
candidates,
relay_parent,
from_job,
).await?;
let inherent_data = ProvisionerInherentData {
bitfields,
backed_candidates: candidates,
disputes: Vec::new(), // until disputes are implemented.
};
for return_sender in return_senders {
return_sender.send(inherent_data.clone()).map_err(|_data| Error::InherentDataReturnChannel)?;
}
Ok(())
}
/// In general, we want to pick all the bitfields. However, we have the following constraints:
///
/// - not more than one per validator
/// - each 1 bit must correspond to an occupied core
///
/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability,
/// we pick the one with the greatest number of 1 bits.
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn select_availability_bitfields(
cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
) -> Vec<SignedAvailabilityBitfield> {
let mut selected: BTreeMap<ValidatorIndex, SignedAvailabilityBitfield> = BTreeMap::new();
'a:
for bitfield in bitfields.iter().cloned() {
if bitfield.payload().0.len()!= cores.len() {
continue
}
let is_better = selected.get(&bitfield.validator_index())
.map_or(true, |b| b.payload().0.count_ones() < bitfield.payload().0.count_ones());
if!is_better { continue }
for (idx, _) in cores.iter().enumerate().filter(|v|!v.1.is_occupied()) {
// Bit is set for an unoccupied core - invalid
if *bitfield.payload().0.get(idx).as_deref().unwrap_or(&false) {
continue 'a
}
}
let _ = selected.insert(bitfield.validator_index(), bitfield);
}
selected.into_iter().map(|(_, b)| b).collect()
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn select_candidates(
availability_cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<Vec<BackedCandidate>, Error> {
let block_number = get_block_number_under_construction(relay_parent, sender).await?;
let mut selected_candidates =
Vec::with_capacity(candidates.len().min(availability_cores.len()));
for (core_idx, core) in availability_cores.iter().enumerate() {
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(occupied_core) => {
if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) {
if let Some(ref scheduled_core) = occupied_core.next_up_on_available {
(scheduled_core, OccupiedCoreAssumption::Included)
} else {
continue;
}
} else {
if occupied_core.time_out_at!= block_number {
continue;
}
if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
(scheduled_core, OccupiedCoreAssumption::TimedOut)
} else {
continue;
}
}
}
CoreState::Free => continue,
};
let validation_data = match request_persisted_validation_data(
relay_parent,
scheduled_core.para_id,
assumption,
sender,
)
.await
.await.map_err(|err| Error::CanceledPersistedValidationData(err))??
{
Some(v) => v,
None => continue,
};
let computed_validation_data_hash = validation_data.hash();
// we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria
if let Some(candidate) = candidates.iter().find(|backed_candidate| {
let descriptor = &backed_candidate.descriptor;
descriptor.para_id == scheduled_core.para_id
&& descriptor.persisted_validation_data_hash == computed_validation_data_hash
}) {
let candidate_hash = candidate.hash();
tracing::trace!(
target: LOG_TARGET,
"Selecting candidate {}. para_id={} core={}",
candidate_hash,
candidate.descriptor.para_id,
core_idx,
);
selected_candidates.push(candidate_hash);
}
}
// now get the backed candidates corresponding to these candidate receipts
let (tx, rx) = oneshot::channel();
sender.send_message(CandidateBackingMessage::GetBackedCandidates(
relay_parent,
selected_candidates.clone(),
tx,
).into()).await;
let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
// `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates`
// _should_ preserve that property, but let's just make sure.
//
// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate
// maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them
// in order, we can ensure that the backed candidates are also in order.
let mut backed_idx = 0;
for selected in selected_candidates {
if selected == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() {
backed_idx += 1;
}
}
if candidates.len()!= backed_idx {
Err(Error::BackedCandidateOrderingProblem)?;
}
// keep only one candidate with validation code.
let mut with_validation_code = false;
candidates.retain(|c| {
if c.candidate.commitments.new_validation_code.is_some() {
if with_validation_code {
return false
}
with_validation_code = true;
}
true
});
tracing::debug!(
target: LOG_TARGET,
"Selected {} candidates for {} cores",
candidates.len(),
availability_cores.len(),
);
Ok(candidates)
}
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn get_block_number_under_construction(
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<BlockNumber, Error> {
let (tx, rx) = oneshot::channel();
sender
.send_message(ChainApiMessage::BlockNumber(
relay_parent,
tx,
).into())
.await;
match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
Ok(Some(n)) => Ok(n + 1),
Ok(None) => Ok(0),
Err(err) => Err(err.into()),
}
}
/// The availability bitfield for a given core is the transpose
/// of a set of signed availability bitfields. It goes like this:
///
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn bitfields_indicate_availability(
core_idx: usize,
bitfields: &[SignedAvailabilityBitfield],
availability: &CoreAvailability,
) -> bool {
let mut availability = availability.clone();
let availability_len = availability.len();
for bitfield in bitfields {
let validator_idx = bitfield.validator_index().0 as usize;
match availability.get_mut(validator_idx) {
None => {
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
// simpler to just handle the error internally here.
tracing::warn!(
target: LOG_TARGET,
validator_idx = %validator_idx,
availability_len = %availability_len,
"attempted to set a transverse bit at idx {} which is greater than bitfield size {}",
validator_idx,
availability_len,
);
return false;
}
Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx],
}
}
3 * availability.count_ones() >= 2 * availability.len()
}
#[derive(Clone)]
struct MetricsInner {
inherent_data_requests: prometheus::CounterVec<prometheus::U64>,
request_inherent_data: prometheus::Histogram,
provisionable_data: prometheus::Histogram,
}
/// Provisioner metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
fn on_inherent_data_request(&self, response: Result<(), ()>) {
if let Some(metrics) = &self.0 {
match response {
Ok(()) => metrics.inherent_data_requests.with_label_values(&["succeeded"]).inc(),
Err(()) => metrics.inherent_data_requests.with_label_values(&["failed"]).inc(),
}
}
}
/// Provide a timer for `request_inherent_data` which observes on drop.
fn time_request_inherent_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.request_inherent_data.start_timer())
}
/// Provide a timer for `provisionable_data` which observes on drop.
fn time_provisionable_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.provisionable_data.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
inherent_data_requests: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"parachain_inherent_data_requests_total",
"Number of InherentData requests served by provisioner.",
),
&["success"],
)?,
registry,
)?,
request_inherent_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_request_inherent_data",
"Time spent within `provisioner::request_inherent_data`",
)
)?,
registry,
)?,
provisionable_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_provisionable_data",
"Time spent within `provisioner::provisionable_data`",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
/// The provisioning subsystem.
pub type ProvisioningSubsystem<Spawner> = JobSubsystem<ProvisioningJob, Spawner>;
#[cfg(test)]
mod tests;
| new | identifier_name |
lib.rs | // Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The provisioner is responsible for assembling a relay chain block
//! from a set of available parachain candidates of its choice.
#![deny(missing_docs, unused_crate_dependencies)]
use bitvec::vec::BitVec;
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use polkadot_node_subsystem::{
errors::{ChainApiError, RuntimeApiError}, PerLeafSpan, SubsystemSender, jaeger,
messages::{
CandidateBackingMessage, ChainApiMessage, ProvisionableData, ProvisionerInherentData,
ProvisionerMessage,
},
};
use polkadot_node_subsystem_util::{
self as util, JobSubsystem, JobSender,
request_availability_cores, request_persisted_validation_data, JobTrait, metrics::{self, prometheus},
};
use polkadot_primitives::v1::{
BackedCandidate, BlockNumber, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption,
SignedAvailabilityBitfield, ValidatorIndex,
};
use std::{pin::Pin, collections::BTreeMap, sync::Arc};
use thiserror::Error;
use futures_timer::Delay;
/// How long to wait before proposing.
const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000);
const LOG_TARGET: &str = "parachain::provisioner";
enum InherentAfter {
Ready,
Wait(Delay),
}
impl InherentAfter {
fn new_from_now() -> Self {
InherentAfter::Wait(Delay::new(PRE_PROPOSE_TIMEOUT))
}
fn is_ready(&self) -> bool {
match *self {
InherentAfter::Ready => true,
InherentAfter::Wait(_) => false,
}
}
async fn ready(&mut self) {
match *self {
InherentAfter::Ready => {
// Make sure we never end the returned future.
// This is required because the `select!` that calls this future will end in a busy loop.
futures::pending!()
},
InherentAfter::Wait(ref mut d) => {
d.await;
*self = InherentAfter::Ready;
},
}
}
}
/// A per-relay-parent job for the provisioning subsystem.
pub struct ProvisioningJob {
relay_parent: Hash,
receiver: mpsc::Receiver<ProvisionerMessage>,
backed_candidates: Vec<CandidateReceipt>,
signed_bitfields: Vec<SignedAvailabilityBitfield>,
metrics: Metrics,
inherent_after: InherentAfter,
awaiting_inherent: Vec<oneshot::Sender<ProvisionerInherentData>>
}
/// Errors in the provisioner.
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
Util(#[from] util::Error),
#[error("failed to get availability cores")]
CanceledAvailabilityCores(#[source] oneshot::Canceled),
#[error("failed to get persisted validation data")]
CanceledPersistedValidationData(#[source] oneshot::Canceled),
#[error("failed to get block number")]
CanceledBlockNumber(#[source] oneshot::Canceled),
#[error("failed to get backed candidates")]
CanceledBackedCandidates(#[source] oneshot::Canceled),
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("failed to send message to ChainAPI")]
ChainApiMessageSend(#[source] mpsc::SendError),
#[error("failed to send message to CandidateBacking to get backed candidates")]
GetBackedCandidatesSend(#[source] mpsc::SendError),
#[error("failed to send return message with Inherents")]
InherentDataReturnChannel,
#[error("backed candidate does not correspond to selected candidate; check logic in provisioner")]
BackedCandidateOrderingProblem,
}
impl JobTrait for ProvisioningJob {
type ToJob = ProvisionerMessage;
type Error = Error;
type RunArgs = ();
type Metrics = Metrics;
const NAME: &'static str = "ProvisioningJob";
/// Run a job for the parent block indicated
//
// this function is in charge of creating and executing the job's main loop
#[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))]
fn run<S: SubsystemSender>(
relay_parent: Hash,
span: Arc<jaeger::Span>,
_run_args: Self::RunArgs,
metrics: Self::Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
mut sender: JobSender<S>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
let job = ProvisioningJob::new(
relay_parent,
metrics,
receiver,
);
job.run_loop(sender.subsystem_sender(), PerLeafSpan::new(span, "provisioner")).await
}
.boxed()
}
}
impl ProvisioningJob {
fn new(
relay_parent: Hash,
metrics: Metrics,
receiver: mpsc::Receiver<ProvisionerMessage>,
) -> Self {
Self {
relay_parent,
receiver,
backed_candidates: Vec::new(),
signed_bitfields: Vec::new(),
metrics,
inherent_after: InherentAfter::new_from_now(),
awaiting_inherent: Vec::new(),
}
}
async fn run_loop(
mut self,
sender: &mut impl SubsystemSender,
span: PerLeafSpan,
) -> Result<(), Error> {
use ProvisionerMessage::{
ProvisionableData, RequestInherentData,
};
loop {
futures::select! {
msg = self.receiver.next().fuse() => match msg {
Some(RequestInherentData(_, return_sender)) => {
let _span = span.child("req-inherent-data");
let _timer = self.metrics.time_request_inherent_data();
if self.inherent_after.is_ready() {
self.send_inherent_data(sender, vec![return_sender]).await;
} else {
self.awaiting_inherent.push(return_sender);
}
}
Some(ProvisionableData(_, data)) => {
let span = span.child("provisionable-data");
let _timer = self.metrics.time_provisionable_data();
self.note_provisionable_data(&span, data);
}
None => break,
},
_ = self.inherent_after.ready().fuse() => {
let _span = span.child("send-inherent-data");
let return_senders = std::mem::take(&mut self.awaiting_inherent);
if!return_senders.is_empty() {
self.send_inherent_data(sender, return_senders).await;
}
}
}
}
Ok(())
}
async fn send_inherent_data(
&mut self,
sender: &mut impl SubsystemSender,
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
) {
if let Err(err) = send_inherent_data(
self.relay_parent,
&self.signed_bitfields,
&self.backed_candidates,
return_senders,
sender,
)
.await
{
tracing::warn!(target: LOG_TARGET, err =?err, "failed to assemble or send inherent data");
self.metrics.on_inherent_data_request(Err(()));
} else |
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn note_provisionable_data(&mut self, span: &jaeger::Span, provisionable_data: ProvisionableData) {
match provisionable_data {
ProvisionableData::Bitfield(_, signed_bitfield) => {
self.signed_bitfields.push(signed_bitfield)
}
ProvisionableData::BackedCandidate(backed_candidate) => {
let _span = span.child("provisionable-backed")
.with_para_id(backed_candidate.descriptor().para_id);
self.backed_candidates.push(backed_candidate)
}
_ => {}
}
}
}
type CoreAvailability = BitVec<bitvec::order::Lsb0, u8>;
/// The provisioner is the subsystem best suited to choosing which specific
/// backed candidates and availability bitfields should be assembled into the
/// block. To engage this functionality, a
/// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of
/// non-conflicting candidates and the appropriate bitfields. Non-conflicting
/// means that there are never two distinct parachain candidates included for
/// the same parachain and that new parachain candidates cannot be included
/// until the previous one either gets declared available or expired.
///
/// The main complication here is going to be around handling
/// occupied-core-assumptions. We might have candidates that are only
/// includable when some bitfields are included. And we might have candidates
/// that are not includable when certain bitfields are included.
///
/// When we're choosing bitfields to include, the rule should be simple:
/// maximize availability. So basically, include all bitfields. And then
/// choose a coherent set of candidates along with that.
#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))]
async fn send_inherent_data(
relay_parent: Hash,
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
from_job: &mut impl SubsystemSender,
) -> Result<(), Error> {
let availability_cores = request_availability_cores(relay_parent, from_job)
.await
.await.map_err(|err| Error::CanceledAvailabilityCores(err))??;
let bitfields = select_availability_bitfields(&availability_cores, bitfields);
let candidates = select_candidates(
&availability_cores,
&bitfields,
candidates,
relay_parent,
from_job,
).await?;
let inherent_data = ProvisionerInherentData {
bitfields,
backed_candidates: candidates,
disputes: Vec::new(), // until disputes are implemented.
};
for return_sender in return_senders {
return_sender.send(inherent_data.clone()).map_err(|_data| Error::InherentDataReturnChannel)?;
}
Ok(())
}
/// In general, we want to pick all the bitfields. However, we have the following constraints:
///
/// - not more than one per validator
/// - each 1 bit must correspond to an occupied core
///
/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability,
/// we pick the one with the greatest number of 1 bits.
///
/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated
/// to the sorting of the input.
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn select_availability_bitfields(
cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
) -> Vec<SignedAvailabilityBitfield> {
let mut selected: BTreeMap<ValidatorIndex, SignedAvailabilityBitfield> = BTreeMap::new();
'a:
for bitfield in bitfields.iter().cloned() {
if bitfield.payload().0.len()!= cores.len() {
continue
}
let is_better = selected.get(&bitfield.validator_index())
.map_or(true, |b| b.payload().0.count_ones() < bitfield.payload().0.count_ones());
if!is_better { continue }
for (idx, _) in cores.iter().enumerate().filter(|v|!v.1.is_occupied()) {
// Bit is set for an unoccupied core - invalid
if *bitfield.payload().0.get(idx).as_deref().unwrap_or(&false) {
continue 'a
}
}
let _ = selected.insert(bitfield.validator_index(), bitfield);
}
selected.into_iter().map(|(_, b)| b).collect()
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn select_candidates(
availability_cores: &[CoreState],
bitfields: &[SignedAvailabilityBitfield],
candidates: &[CandidateReceipt],
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<Vec<BackedCandidate>, Error> {
let block_number = get_block_number_under_construction(relay_parent, sender).await?;
let mut selected_candidates =
Vec::with_capacity(candidates.len().min(availability_cores.len()));
for (core_idx, core) in availability_cores.iter().enumerate() {
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(occupied_core) => {
if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) {
if let Some(ref scheduled_core) = occupied_core.next_up_on_available {
(scheduled_core, OccupiedCoreAssumption::Included)
} else {
continue;
}
} else {
if occupied_core.time_out_at!= block_number {
continue;
}
if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
(scheduled_core, OccupiedCoreAssumption::TimedOut)
} else {
continue;
}
}
}
CoreState::Free => continue,
};
let validation_data = match request_persisted_validation_data(
relay_parent,
scheduled_core.para_id,
assumption,
sender,
)
.await
.await.map_err(|err| Error::CanceledPersistedValidationData(err))??
{
Some(v) => v,
None => continue,
};
let computed_validation_data_hash = validation_data.hash();
// we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria
if let Some(candidate) = candidates.iter().find(|backed_candidate| {
let descriptor = &backed_candidate.descriptor;
descriptor.para_id == scheduled_core.para_id
&& descriptor.persisted_validation_data_hash == computed_validation_data_hash
}) {
let candidate_hash = candidate.hash();
tracing::trace!(
target: LOG_TARGET,
"Selecting candidate {}. para_id={} core={}",
candidate_hash,
candidate.descriptor.para_id,
core_idx,
);
selected_candidates.push(candidate_hash);
}
}
// now get the backed candidates corresponding to these candidate receipts
let (tx, rx) = oneshot::channel();
sender.send_message(CandidateBackingMessage::GetBackedCandidates(
relay_parent,
selected_candidates.clone(),
tx,
).into()).await;
let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
// `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates`
// _should_ preserve that property, but let's just make sure.
//
// We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate
// maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them
// in order, we can ensure that the backed candidates are also in order.
let mut backed_idx = 0;
for selected in selected_candidates {
if selected == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() {
backed_idx += 1;
}
}
if candidates.len()!= backed_idx {
Err(Error::BackedCandidateOrderingProblem)?;
}
// keep only one candidate with validation code.
let mut with_validation_code = false;
candidates.retain(|c| {
if c.candidate.commitments.new_validation_code.is_some() {
if with_validation_code {
return false
}
with_validation_code = true;
}
true
});
tracing::debug!(
target: LOG_TARGET,
"Selected {} candidates for {} cores",
candidates.len(),
availability_cores.len(),
);
Ok(candidates)
}
/// Produces a block number 1 higher than that of the relay parent
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))]
async fn get_block_number_under_construction(
relay_parent: Hash,
sender: &mut impl SubsystemSender,
) -> Result<BlockNumber, Error> {
let (tx, rx) = oneshot::channel();
sender
.send_message(ChainApiMessage::BlockNumber(
relay_parent,
tx,
).into())
.await;
match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
Ok(Some(n)) => Ok(n + 1),
Ok(None) => Ok(0),
Err(err) => Err(err.into()),
}
}
/// The availability bitfield for a given core is the transpose
/// of a set of signed availability bitfields. It goes like this:
///
/// - construct a transverse slice along `core_idx`
/// - bitwise-or it with the availability slice
/// - count the 1 bits, compare to the total length; true on 2/3+
#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))]
fn bitfields_indicate_availability(
core_idx: usize,
bitfields: &[SignedAvailabilityBitfield],
availability: &CoreAvailability,
) -> bool {
let mut availability = availability.clone();
let availability_len = availability.len();
for bitfield in bitfields {
let validator_idx = bitfield.validator_index().0 as usize;
match availability.get_mut(validator_idx) {
None => {
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
// simpler to just handle the error internally here.
tracing::warn!(
target: LOG_TARGET,
validator_idx = %validator_idx,
availability_len = %availability_len,
"attempted to set a transverse bit at idx {} which is greater than bitfield size {}",
validator_idx,
availability_len,
);
return false;
}
Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx],
}
}
3 * availability.count_ones() >= 2 * availability.len()
}
#[derive(Clone)]
struct MetricsInner {
inherent_data_requests: prometheus::CounterVec<prometheus::U64>,
request_inherent_data: prometheus::Histogram,
provisionable_data: prometheus::Histogram,
}
/// Provisioner metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
fn on_inherent_data_request(&self, response: Result<(), ()>) {
if let Some(metrics) = &self.0 {
match response {
Ok(()) => metrics.inherent_data_requests.with_label_values(&["succeeded"]).inc(),
Err(()) => metrics.inherent_data_requests.with_label_values(&["failed"]).inc(),
}
}
}
/// Provide a timer for `request_inherent_data` which observes on drop.
fn time_request_inherent_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.request_inherent_data.start_timer())
}
/// Provide a timer for `provisionable_data` which observes on drop.
fn time_provisionable_data(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.provisionable_data.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
inherent_data_requests: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"parachain_inherent_data_requests_total",
"Number of InherentData requests served by provisioner.",
),
&["success"],
)?,
registry,
)?,
request_inherent_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_request_inherent_data",
"Time spent within `provisioner::request_inherent_data`",
)
)?,
registry,
)?,
provisionable_data: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"parachain_provisioner_provisionable_data",
"Time spent within `provisioner::provisionable_data`",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
/// The provisioning subsystem.
pub type ProvisioningSubsystem<Spawner> = JobSubsystem<ProvisioningJob, Spawner>;
#[cfg(test)]
mod tests;
| {
self.metrics.on_inherent_data_request(Ok(()));
} | conditional_block |
types.rs | use crate::{encode_section, Encode, Section, SectionId};
/// Represents a subtype of possible other types in a WebAssembly module.
#[derive(Debug, Clone)]
pub struct SubType {
/// Is the subtype final.
pub is_final: bool, | pub structural_type: StructuralType,
}
/// Represents a structural type in a WebAssembly module.
#[derive(Debug, Clone)]
pub enum StructuralType {
/// The type is for a function.
Func(FuncType),
/// The type is for an array.
Array(ArrayType),
/// The type is for a struct.
Struct(StructType),
}
/// Represents a type of a function in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct FuncType {
/// The combined parameters and result types.
params_results: Box<[ValType]>,
/// The number of parameter types.
len_params: usize,
}
/// Represents a type of an array in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ArrayType(pub FieldType);
/// Represents a type of a struct in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct StructType {
/// Struct fields.
pub fields: Box<[FieldType]>,
}
/// Field type in structural types (structs, arrays).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct FieldType {
/// Storage type of the field.
pub element_type: StorageType,
/// Is the field mutable.
pub mutable: bool,
}
/// Storage type for structural type fields.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum StorageType {
/// The `i8` type.
I8,
/// The `i16` type.
I16,
/// A value type.
Val(ValType),
}
/// The type of a core WebAssembly value.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum ValType {
/// The `i32` type.
I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new module type section encoder.
pub fn new() -> Self {
Self::default()
}
/// The number of types in the section.
pub fn len(&self) -> u32 {
self.num_added
}
/// Determines if the section is empty.
pub fn is_empty(&self) -> bool {
self.num_added == 0
}
/// Define a function type in this type section.
pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self
where
P: IntoIterator<Item = ValType>,
P::IntoIter: ExactSizeIterator,
R: IntoIterator<Item = ValType>,
R::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let results = results.into_iter();
self.bytes.push(0x60);
params.len().encode(&mut self.bytes);
params.for_each(|p| p.encode(&mut self.bytes));
results.len().encode(&mut self.bytes);
results.for_each(|p| p.encode(&mut self.bytes));
self.num_added += 1;
self
}
/// Define an array type in this type section.
pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
self.bytes.push(0x5e);
self.field(ty, mutable);
self.num_added += 1;
self
}
fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
ty.encode(&mut self.bytes);
self.bytes.push(mutable as u8);
self
}
/// Define a struct type in this type section.
pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self {
self.bytes.push(0x5f);
fields.len().encode(&mut self.bytes);
for f in fields.iter() {
self.field(&f.element_type, f.mutable);
}
self.num_added += 1;
self
}
/// Define an explicit subtype in this type section.
pub fn subtype(&mut self, ty: &SubType) -> &mut Self {
// In the GC spec, supertypes is a vector, not an option.
let st = match ty.supertype_idx {
Some(idx) => vec![idx],
None => vec![],
};
if ty.is_final {
self.bytes.push(0x4e);
st.encode(&mut self.bytes);
} else if!st.is_empty() {
self.bytes.push(0x50);
st.encode(&mut self.bytes);
}
match &ty.structural_type {
StructuralType::Func(ty) => {
self.function(ty.params().iter().copied(), ty.results().iter().copied());
}
StructuralType::Array(ArrayType(ty)) => {
self.array(&ty.element_type, ty.mutable);
}
StructuralType::Struct(ty) => {
self.struct_(ty.fields.to_vec());
}
}
self
}
}
impl Encode for TypeSection {
fn encode(&self, sink: &mut Vec<u8>) {
encode_section(sink, self.num_added, &self.bytes);
}
}
impl Section for TypeSection {
fn id(&self) -> u8 {
SectionId::Type.into()
}
} | /// The list of supertype indexes. As of GC MVP, there can be at most one supertype.
pub supertype_idx: Option<u32>,
/// The structural type of the subtype. | random_line_split |
types.rs | use crate::{encode_section, Encode, Section, SectionId};
/// Represents a subtype of possible other types in a WebAssembly module.
#[derive(Debug, Clone)]
pub struct SubType {
/// Is the subtype final.
pub is_final: bool,
/// The list of supertype indexes. As of GC MVP, there can be at most one supertype.
pub supertype_idx: Option<u32>,
/// The structural type of the subtype.
pub structural_type: StructuralType,
}
/// Represents a structural type in a WebAssembly module.
#[derive(Debug, Clone)]
pub enum StructuralType {
/// The type is for a function.
Func(FuncType),
/// The type is for an array.
Array(ArrayType),
/// The type is for a struct.
Struct(StructType),
}
/// Represents a type of a function in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct FuncType {
/// The combined parameters and result types.
params_results: Box<[ValType]>,
/// The number of parameter types.
len_params: usize,
}
/// Represents a type of an array in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ArrayType(pub FieldType);
/// Represents a type of a struct in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct StructType {
/// Struct fields.
pub fields: Box<[FieldType]>,
}
/// Field type in structural types (structs, arrays).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct FieldType {
/// Storage type of the field.
pub element_type: StorageType,
/// Is the field mutable.
pub mutable: bool,
}
/// Storage type for structural type fields.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum | {
/// The `i8` type.
I8,
/// The `i16` type.
I16,
/// A value type.
Val(ValType),
}
/// The type of a core WebAssembly value.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum ValType {
/// The `i32` type.
I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new module type section encoder.
pub fn new() -> Self {
Self::default()
}
/// The number of types in the section.
pub fn len(&self) -> u32 {
self.num_added
}
/// Determines if the section is empty.
pub fn is_empty(&self) -> bool {
self.num_added == 0
}
/// Define a function type in this type section.
pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self
where
P: IntoIterator<Item = ValType>,
P::IntoIter: ExactSizeIterator,
R: IntoIterator<Item = ValType>,
R::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let results = results.into_iter();
self.bytes.push(0x60);
params.len().encode(&mut self.bytes);
params.for_each(|p| p.encode(&mut self.bytes));
results.len().encode(&mut self.bytes);
results.for_each(|p| p.encode(&mut self.bytes));
self.num_added += 1;
self
}
/// Define an array type in this type section.
pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
self.bytes.push(0x5e);
self.field(ty, mutable);
self.num_added += 1;
self
}
fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
ty.encode(&mut self.bytes);
self.bytes.push(mutable as u8);
self
}
/// Define a struct type in this type section.
pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self {
self.bytes.push(0x5f);
fields.len().encode(&mut self.bytes);
for f in fields.iter() {
self.field(&f.element_type, f.mutable);
}
self.num_added += 1;
self
}
/// Define an explicit subtype in this type section.
pub fn subtype(&mut self, ty: &SubType) -> &mut Self {
// In the GC spec, supertypes is a vector, not an option.
let st = match ty.supertype_idx {
Some(idx) => vec![idx],
None => vec![],
};
if ty.is_final {
self.bytes.push(0x4e);
st.encode(&mut self.bytes);
} else if!st.is_empty() {
self.bytes.push(0x50);
st.encode(&mut self.bytes);
}
match &ty.structural_type {
StructuralType::Func(ty) => {
self.function(ty.params().iter().copied(), ty.results().iter().copied());
}
StructuralType::Array(ArrayType(ty)) => {
self.array(&ty.element_type, ty.mutable);
}
StructuralType::Struct(ty) => {
self.struct_(ty.fields.to_vec());
}
}
self
}
}
impl Encode for TypeSection {
fn encode(&self, sink: &mut Vec<u8>) {
encode_section(sink, self.num_added, &self.bytes);
}
}
impl Section for TypeSection {
fn id(&self) -> u8 {
SectionId::Type.into()
}
}
| StorageType | identifier_name |
types.rs | use crate::{encode_section, Encode, Section, SectionId};
/// Represents a subtype of possible other types in a WebAssembly module.
#[derive(Debug, Clone)]
pub struct SubType {
/// Is the subtype final.
pub is_final: bool,
/// The list of supertype indexes. As of GC MVP, there can be at most one supertype.
pub supertype_idx: Option<u32>,
/// The structural type of the subtype.
pub structural_type: StructuralType,
}
/// Represents a structural type in a WebAssembly module.
#[derive(Debug, Clone)]
pub enum StructuralType {
/// The type is for a function.
Func(FuncType),
/// The type is for an array.
Array(ArrayType),
/// The type is for a struct.
Struct(StructType),
}
/// Represents a type of a function in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct FuncType {
/// The combined parameters and result types.
params_results: Box<[ValType]>,
/// The number of parameter types.
len_params: usize,
}
/// Represents a type of an array in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct ArrayType(pub FieldType);
/// Represents a type of a struct in a WebAssembly module.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct StructType {
/// Struct fields.
pub fields: Box<[FieldType]>,
}
/// Field type in structural types (structs, arrays).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub struct FieldType {
/// Storage type of the field.
pub element_type: StorageType,
/// Is the field mutable.
pub mutable: bool,
}
/// Storage type for structural type fields.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum StorageType {
/// The `i8` type.
I8,
/// The `i16` type.
I16,
/// A value type.
Val(ValType),
}
/// The type of a core WebAssembly value.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum ValType {
/// The `i32` type.
I32,
/// The `i64` type.
I64,
/// The `f32` type.
F32,
/// The `f64` type.
F64,
/// The `v128` type.
///
/// Part of the SIMD proposal.
V128,
/// A reference type.
///
/// The `funcref` and `externref` type fall into this category and the full
/// generalization here is due to the implementation of the
/// function-references proposal.
Ref(RefType),
}
impl FuncType {
/// Creates a new [`FuncType`] from the given `params` and `results`.
pub fn new<P, R>(params: P, results: R) -> Self
where
P: IntoIterator<Item = ValType>,
R: IntoIterator<Item = ValType>,
{
let mut buffer = params.into_iter().collect::<Vec<_>>();
let len_params = buffer.len();
buffer.extend(results);
Self {
params_results: buffer.into(),
len_params,
}
}
/// Returns a shared slice to the parameter types of the [`FuncType`].
#[inline]
pub fn params(&self) -> &[ValType] {
&self.params_results[..self.len_params]
}
/// Returns a shared slice to the result types of the [`FuncType`].
#[inline]
pub fn results(&self) -> &[ValType] {
&self.params_results[self.len_params..]
}
}
impl ValType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF);
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF);
}
impl Encode for StorageType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
StorageType::I8 => sink.push(0x7A),
StorageType::I16 => sink.push(0x79),
StorageType::Val(vt) => vt.encode(sink),
}
}
}
impl Encode for ValType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
ValType::I32 => sink.push(0x7F),
ValType::I64 => sink.push(0x7E),
ValType::F32 => sink.push(0x7D),
ValType::F64 => sink.push(0x7C),
ValType::V128 => sink.push(0x7B),
ValType::Ref(rt) => rt.encode(sink),
}
}
}
/// A reference type.
///
/// This is largely part of the function references proposal for WebAssembly but
/// additionally is used by the `funcref` and `externref` types. The full
/// generality of this type is only exercised with function-references.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
#[allow(missing_docs)]
pub struct RefType {
pub nullable: bool,
pub heap_type: HeapType,
}
impl RefType {
/// Alias for the `funcref` type in WebAssembly
pub const FUNCREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Func,
};
/// Alias for the `externref` type in WebAssembly
pub const EXTERNREF: RefType = RefType {
nullable: true,
heap_type: HeapType::Extern,
};
}
impl Encode for RefType {
fn encode(&self, sink: &mut Vec<u8>) {
if self.nullable {
// Favor the original encodings of `funcref` and `externref` where
// possible
match self.heap_type {
HeapType::Func => return sink.push(0x70),
HeapType::Extern => return sink.push(0x6f),
_ => {}
}
}
if self.nullable {
sink.push(0x6C);
} else {
sink.push(0x6B);
}
self.heap_type.encode(sink);
}
}
impl From<RefType> for ValType {
fn from(ty: RefType) -> ValType {
ValType::Ref(ty)
}
}
/// Part of the function references proposal.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum HeapType {
/// Untyped (any) function.
Func,
/// External heap type.
Extern,
/// The `any` heap type. The common supertype (a.k.a. top) of all internal types.
Any,
/// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types.
None,
/// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types.
NoExtern,
/// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types.
NoFunc,
/// The `eq` heap type. The common supertype of all referenceable types on which comparison
/// (ref.eq) is allowed.
Eq,
/// The `struct` heap type. The common supertype of all struct types.
Struct,
/// The `array` heap type. The common supertype of all array types.
Array,
/// The i31 heap type.
I31,
/// User defined type at the given index.
Indexed(u32),
}
impl Encode for HeapType {
fn encode(&self, sink: &mut Vec<u8>) {
match self {
HeapType::Func => sink.push(0x70),
HeapType::Extern => sink.push(0x6F),
HeapType::Any => sink.push(0x6E),
HeapType::None => sink.push(0x65),
HeapType::NoExtern => sink.push(0x69),
HeapType::NoFunc => sink.push(0x68),
HeapType::Eq => sink.push(0x6D),
HeapType::Struct => sink.push(0x67),
HeapType::Array => sink.push(0x66),
HeapType::I31 => sink.push(0x6A),
// Note that this is encoded as a signed type rather than unsigned
// as it's decoded as an s33
HeapType::Indexed(i) => i64::from(*i).encode(sink),
}
}
}
/// An encoder for the type section of WebAssembly modules.
///
/// # Example
///
/// ```rust
/// use wasm_encoder::{Module, TypeSection, ValType};
///
/// let mut types = TypeSection::new();
///
/// types.function([ValType::I32, ValType::I32], [ValType::I64]);
///
/// let mut module = Module::new();
/// module.section(&types);
///
/// let bytes = module.finish();
/// ```
#[derive(Clone, Debug, Default)]
pub struct TypeSection {
bytes: Vec<u8>,
num_added: u32,
}
impl TypeSection {
/// Create a new module type section encoder.
pub fn new() -> Self {
Self::default()
}
/// The number of types in the section.
pub fn len(&self) -> u32 {
self.num_added
}
/// Determines if the section is empty.
pub fn is_empty(&self) -> bool {
self.num_added == 0
}
/// Define a function type in this type section.
pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self
where
P: IntoIterator<Item = ValType>,
P::IntoIter: ExactSizeIterator,
R: IntoIterator<Item = ValType>,
R::IntoIter: ExactSizeIterator,
{
let params = params.into_iter();
let results = results.into_iter();
self.bytes.push(0x60);
params.len().encode(&mut self.bytes);
params.for_each(|p| p.encode(&mut self.bytes));
results.len().encode(&mut self.bytes);
results.for_each(|p| p.encode(&mut self.bytes));
self.num_added += 1;
self
}
/// Define an array type in this type section.
pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
self.bytes.push(0x5e);
self.field(ty, mutable);
self.num_added += 1;
self
}
fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self {
ty.encode(&mut self.bytes);
self.bytes.push(mutable as u8);
self
}
/// Define a struct type in this type section.
pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self |
/// Define an explicit subtype in this type section.
pub fn subtype(&mut self, ty: &SubType) -> &mut Self {
// In the GC spec, supertypes is a vector, not an option.
let st = match ty.supertype_idx {
Some(idx) => vec![idx],
None => vec![],
};
if ty.is_final {
self.bytes.push(0x4e);
st.encode(&mut self.bytes);
} else if!st.is_empty() {
self.bytes.push(0x50);
st.encode(&mut self.bytes);
}
match &ty.structural_type {
StructuralType::Func(ty) => {
self.function(ty.params().iter().copied(), ty.results().iter().copied());
}
StructuralType::Array(ArrayType(ty)) => {
self.array(&ty.element_type, ty.mutable);
}
StructuralType::Struct(ty) => {
self.struct_(ty.fields.to_vec());
}
}
self
}
}
impl Encode for TypeSection {
fn encode(&self, sink: &mut Vec<u8>) {
encode_section(sink, self.num_added, &self.bytes);
}
}
impl Section for TypeSection {
fn id(&self) -> u8 {
SectionId::Type.into()
}
}
| {
self.bytes.push(0x5f);
fields.len().encode(&mut self.bytes);
for f in fields.iter() {
self.field(&f.element_type, f.mutable);
}
self.num_added += 1;
self
} | identifier_body |
bid.rs | //! Auctions and bidding during the first phase of the deal.
use std::fmt;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use strum_macros::EnumIter;
use super::cards;
use super::deal;
use super::pos;
/// Goal set by a contract.
///
/// Determines the winning conditions and the score on success.
#[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub enum Target {
Prise,
Garde,
GardeSans,
GardeContre,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state!= AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos)!= BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn | (&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state!= AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError));
assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding));
// Someone bids.
assert_eq!(
auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false),
Ok(AuctionState::Bidding)
);
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(),
Some(BidError::NonRaisedTarget)
);
// Surbid
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false),
Ok(AuctionState::Bidding)
);
// Allready passed
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError));
// Last to pass
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over));
assert!(auction.state == AuctionState::Over);
match auction.complete() {
Err(_) => assert!(false),
_ => {}
}
}
}
| hands | identifier_name |
bid.rs | //! Auctions and bidding during the first phase of the deal.
use std::fmt;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use strum_macros::EnumIter;
use super::cards;
use super::deal;
use super::pos;
/// Goal set by a contract.
///
/// Determines the winning conditions and the score on success.
#[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub enum Target {
Prise,
Garde,
GardeSans,
GardeContre,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state!= AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() |
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos)!= BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state!= AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError));
assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding));
// Someone bids.
assert_eq!(
auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false),
Ok(AuctionState::Bidding)
);
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(),
Some(BidError::NonRaisedTarget)
);
// Surbid
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false),
Ok(AuctionState::Bidding)
);
// Allready passed
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError));
// Last to pass
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over));
assert!(auction.state == AuctionState::Over);
match auction.complete() {
Err(_) => assert!(false),
_ => {}
}
}
}
| {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
} | conditional_block |
bid.rs | //! Auctions and bidding during the first phase of the deal.
use std::fmt;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use strum_macros::EnumIter;
use super::cards;
use super::deal;
use super::pos;
/// Goal set by a contract.
///
/// Determines the winning conditions and the score on success.
#[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub enum Target {
Prise,
Garde,
GardeSans,
GardeContre,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result |
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state!= AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos)!= BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?;
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
}
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state!= AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError));
assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding));
// Someone bids.
assert_eq!(
auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false),
Ok(AuctionState::Bidding)
);
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(),
Some(BidError::NonRaisedTarget)
);
// Surbid
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false),
Ok(AuctionState::Bidding)
);
// Allready passed
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError));
// Last to pass
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over));
assert!(auction.state == AuctionState::Over);
match auction.complete() {
Err(_) => assert!(false),
_ => {}
}
}
}
| {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
} | identifier_body |
bid.rs | //! Auctions and bidding during the first phase of the deal.
use std::fmt;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use strum_macros::EnumIter;
use super::cards;
use super::deal;
use super::pos;
/// Goal set by a contract.
///
/// Determines the winning conditions and the score on success.
#[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub enum Target {
Prise,
Garde,
GardeSans,
GardeContre,
}
impl Target {
/// Returns the score this target would give on success.
pub fn multiplier(self) -> i32 {
match self {
Target::Prise => 1,
Target::Garde => 2,
Target::GardeSans => 4,
Target::GardeContre => 6,
}
}
pub fn to_str(self) -> &'static str {
match self {
Target::Prise => "prise",
Target::Garde => "garde",
Target::GardeSans => "garde sans",
Target::GardeContre => "garde contre",
}
}
}
impl FromStr for Target {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
match s {
"prise" => Ok(Target::Prise),
"garde" => Ok(Target::Garde),
"garde sans" => Ok(Target::GardeSans),
"garde contre" => Ok(Target::GardeContre),
_ => Err(format!("invalid target: {}", s)),
}
}
}
impl ToString for Target {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// Contract taken
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Contract {
/// Initial author of the contract.
pub author: pos::PlayerPos,
/// Target for the contract.
pub target: Target,
/// Slam asked?
pub slam: bool,
}
impl Contract {
fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self {
Contract {
author,
target,
slam,
}
}
}
impl ToString for Contract {
fn to_string(&self) -> String {
let str_slam = if self.slam { " SLAM" } else { "" };
format!("{}{}", self.target.to_str(), str_slam)
}
}
/// Current state of an auction
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum AuctionState {
/// Players are still bidding for the highest contract
Bidding,
/// Auction is over, deal will begin
Over,
/// No contract was taken, a new deal will start
Cancelled,
}
/// Bidding status for a player
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub enum BidStatus {
Todo,
Passed,
Bid,
}
/// Represents the entire auction process.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Auction {
contract: Option<Contract>,
players_status: Vec<BidStatus>,
first: pos::PlayerPos,
state: AuctionState,
players: Vec<cards::Hand>,
dog: cards::Hand,
}
/// Possible error occuring during an Auction.
#[derive(PartialEq, Debug)]
pub enum BidError {
/// The auction was closed and does not accept more contracts.
AuctionClosed,
/// A player tried bidding before his turn.
TurnError,
/// The given bid was not higher than the previous one.
NonRaisedTarget,
/// Cannot complete the auction when it is still running.
AuctionRunning,
/// No contract was offered during the auction, it cannot complete.
NoContract,
}
impl fmt::Display for BidError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
BidError::AuctionClosed => write!(f, "auctions are closed"),
BidError::TurnError => write!(f, "invalid turn order"),
BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"),
BidError::AuctionRunning => write!(f, "the auction are still running"),
BidError::NoContract => write!(f, "no contract was offered"),
}
}
}
impl Auction {
/// Starts a new auction, starting with the player `first`.
pub fn new(first: pos::PlayerPos) -> Self {
let count = first.count as usize;
let (hands, dog) = super::deal_hands(count);
Auction {
contract: None,
players_status: vec![BidStatus::Todo; count],
state: AuctionState::Bidding,
first,
players: hands,
dog
}
}
/// Override Auction hands (for tests)
pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) {
self.players = hands;
self.dog = dog;
}
/// Returns the current state of the auctions.
pub fn get_state(&self) -> AuctionState {
self.state
}
fn can_bid(&self, target: Target) -> Result<(), BidError> {
if self.state!= AuctionState::Bidding {
return Err(BidError::AuctionClosed);
}
if let Some(contract) = self.contract.clone() {
if target.multiplier() <= contract.target.multiplier() {
return Err(BidError::NonRaisedTarget);
}
}
Ok(())
}
fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus {
self.players_status[pos.to_n()]
}
fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) {
self.players_status[pos.to_n()] = status;
}
/// Returns the player that is expected to bid next.
pub fn next_player(&self) -> pos::PlayerPos {
let pos_init = if let Some(contract) = self.contract.clone() {
contract.author.next()
} else {
self.first
};
let mut next_pos = pos_init;
while self.get_player_status(next_pos)!= BidStatus::Todo {
next_pos = next_pos.next();
if next_pos == pos_init {
panic!("all players have talked")
}
}
next_pos
}
/// Check if there are still players waiting for bidding
fn no_player_left(&self) -> bool {
!self.players_status.contains(&BidStatus::Todo)
}
/// Bid a new, higher contract.
pub fn bid(
&mut self,
pos: pos::PlayerPos,
target: Target,
slam: bool,
) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.can_bid(target)?; |
let contract = Contract::new(pos, target, slam);
self.contract = Some(contract);
self.set_player_status(pos, BidStatus::Bid);
// If we're all the way to the top, there's nowhere else to go
if self.no_player_left() || target == Target::GardeContre {
self.state = AuctionState::Over;
}
Ok(self.state)
}
/// Look at the last offered contract.
///
/// Returns `None` if no contract was offered yet.
pub fn current_contract(&self) -> Option<&Contract> {
self.contract.as_ref()
}
/// Returns the players cards.
pub fn hands(&self) -> &Vec<cards::Hand> {
&self.players
}
/// The current player passes his turn.
///
/// Returns the new auction state :
///
/// * `AuctionState::Cancelled` if all players passed
/// * `AuctionState::Over` if 5 players passed in a row
/// * The previous state otherwise
pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> {
if pos!= self.next_player() {
return Err(BidError::TurnError);
}
self.set_player_status(pos, BidStatus::Passed);
if self.no_player_left() {
self.state = if self.contract.is_some() {
AuctionState::Over
} else {
AuctionState::Cancelled
}
}
Ok(self.state)
}
/// Consumes a complete auction to enter the second deal phase.
///
/// If the auction was ready, returns `Ok<DealState>`
pub fn complete(&self) -> Result<deal::DealState, BidError> {
if self.state!= AuctionState::Over {
Err(BidError::AuctionRunning)
// } else if self.contract.is_none() {
} else {
if let Some(contract) = self.contract.clone() {
Ok(deal::DealState::new(
self.first,
self.players.clone(),
self.dog,
contract,
pos::PlayerPos::from_n(0,5), //XXX placeholder
))
} else {
Err(BidError::NoContract)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pos;
#[test]
fn test_auction() {
let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5));
assert!(auction.state == AuctionState::Bidding);
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding));
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError));
assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding));
// Someone bids.
assert_eq!(
auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false),
Ok(AuctionState::Bidding)
);
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(),
Some(BidError::NonRaisedTarget)
);
// Surbid
assert_eq!(
auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false),
Ok(AuctionState::Bidding)
);
// Allready passed
assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError));
// Last to pass
assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over));
assert!(auction.state == AuctionState::Over);
match auction.complete() {
Err(_) => assert!(false),
_ => {}
}
}
} |
// Reset previous bidder status
if let Some(contract) = self.contract.clone() {
self.set_player_status(contract.author, BidStatus::Todo);
} | random_line_split |
mod.rs | caller
/// cannot know ahead of time which type to use, `peek_message_type` can be used
/// to peek at the header first to figure out which static type should be used
/// in a subsequent call to `parse`.
///
/// Note that `peek_message_type` only inspects certain fields in the header,
/// and so `peek_message_type` succeeding does not guarantee that a subsequent
/// call to `parse` will also succeed.
pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> {
let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes)
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
MessageType::try_from(hdr_pfx.msg_type).map_err(|_| {
debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,)
})
}
/// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`.
///
/// This trait is kept separate from `IcmpIpExt` to not require a type parameter
/// that implements `ByteSlice`.
pub trait IcmpIpTypes: Ip {
/// The type of an ICMP parameter problem code.
///
/// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this
/// is `Icmpv6ParameterProblemCode`.
type ParameterProblemCode: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter problem pointer.
///
/// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`.
type ParameterProblemPointer: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter header length.
///
/// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`.
type HeaderLen: PartialEq + Send + Sync + Debug;
}
// A default implementation for any I: Ip. This is to convince the Rust compiler
// that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know
// that Ipv4 and Ipv6 are the only types implementing Ip and so, since we
// implement IcmpIpTypes for both of these types, this is fine. The compiler isn't
// so smart. This implementation should never actually be used.
impl<I: Ip> IcmpIpTypes for I {
default type ParameterProblemCode = Never;
default type ParameterProblemPointer = Never;
default type HeaderLen = Never;
}
impl IcmpIpTypes for Ipv4 {
type ParameterProblemCode = Icmpv4ParameterProblemCode;
type ParameterProblemPointer = u8;
type HeaderLen = usize;
}
impl IcmpIpTypes for Ipv6 {
type ParameterProblemCode = Icmpv6ParameterProblemCode;
type ParameterProblemPointer = u32;
type HeaderLen = ();
}
/// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`.
pub trait IcmpIpExt: Ip {
/// The type of ICMP messages.
///
/// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is
/// `Icmpv6MessageType`.
type IcmpMessageType: IcmpMessageType;
/// The identifier for this ICMP version.
///
/// This value will be found in an IPv4 packet's Protocol field (for ICMPv4
/// packets) or an IPv6 fixed header's or last extension header's Next
/// Heeader field (for ICMPv6 packets).
const ICMP_IP_PROTO: IpProto;
/// Compute the length of the header of the packet prefix stored in `bytes`.
///
/// Given the prefix of a packet stored in `bytes`, compute the length of
/// the header of that packet, or `bytes.len()` if `bytes` does not contain
/// the entire header. If the version is IPv6, the returned length should
/// include all extension headers.
fn header_len(bytes: &[u8]) -> usize;
}
impl IcmpIpExt for Ipv4 {
type IcmpMessageType = Icmpv4MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmp;
fn header_len(bytes: &[u8]) -> usize {
if bytes.len() < ipv4::IPV4_MIN_HDR_LEN {
return bytes.len();
}
let (header_prefix, _) =
LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap();
cmp::min(header_prefix.ihl() as usize * 4, bytes.len())
}
}
impl IcmpIpExt for Ipv6 {
type IcmpMessageType = Icmpv6MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6;
// TODO: Re-implement this in terms of partial parsing, and then get rid of
// the `header_len` method.
fn header_len(_bytes: &[u8]) -> usize {
// NOTE: We panic here rather than doing log_unimplemented! because
// there's no sane default value for this function. If it's called, it
// doesn't make sense for the program to continue executing; if we did,
// it would cause bugs in the caller.
unimplemented!()
}
}
/// An ICMP or ICMPv6 packet
///
/// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet`
pub trait IcmpPacketType<B: ByteSlice, I: Ip>:
Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError>
{
}
impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {}
impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {}
// TODO(joshlf): Once we have generic associated types, refactor this so that we
// don't have to bind B ahead of time. Removing that requirement would make some
// APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter
// from them as well.
/// `MessageBody` represents the parsed body of the ICMP packet.
///
/// - For messages that expect no body, the `MessageBody` is of type `()`.
/// - For NDP messages, the `MessageBody` is of the type `ndp::Options`.
/// - For all other messages, the `MessageBody` will be of the type
/// `OriginalPacket`, which is a thin wrapper around `B`.
pub trait MessageBody<B>: Sized {
/// Whether or not a message body is expected in an ICMP packet.
const EXPECTS_BODY: bool = true;
/// Parse the MessageBody from the provided bytes.
fn parse(bytes: B) -> ParseResult<Self>
where
B: ByteSlice;
/// The length of the underlying buffer.
fn len(&self) -> usize
where
B: ByteSlice;
/// Is the body empty?
///
/// `b.is_empty()` is equivalent to `b.len() == 0`.
fn is_empty(&self) -> bool
where
B: ByteSlice,
{
self.len() == 0
}
/// Return the underlying bytes.
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>;
}
impl<B> MessageBody<B> for () {
const EXPECTS_BODY: bool = false;
fn parse(bytes: B) -> ParseResult<()>
where
B: ByteSlice,
{
if!bytes.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
Ok(())
}
fn len(&self) -> usize {
0
}
fn bytes(&self) -> &[u8] {
&[]
}
}
/// A thin wrapper around B which implements `MessageBody`.
#[derive(Debug)]
pub struct OriginalPacket<B>(B);
impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> {
/// Returns the the body of the original packet.
pub fn body<I: IcmpIpExt>(&self) -> &[u8] {
// TODO(joshlf): Can these debug_asserts be triggered by external input?
let header_len = I::header_len(&self.0);
debug_assert!(header_len <= self.0.len());
debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8);
&self.0[header_len..]
}
}
impl<B> MessageBody<B> for OriginalPacket<B> {
fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> {
Ok(OriginalPacket(bytes))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.0.len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
&self.0
}
}
impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> {
fn parse(bytes: B) -> ParseResult<Options<B, O>>
where
B: ByteSlice,
{
Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options"))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.bytes().len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
self.bytes()
}
}
/// An ICMP message.
pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>:
Sized + Copy + FromBytes + AsBytes + Unaligned
{
/// The type of codes used with this message.
///
/// The ICMP header includes an 8-bit "code" field. For a given message
/// type, different values of this field carry different meanings. Not all
/// code values are used - some may be invalid. This type represents a
/// parsed code. For example, for TODO, it is the TODO type.
type Code: Into<u8> + Copy + Debug;
/// The type of the body used with this message.
type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> {
IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() }
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()>
for IcmpPacketRaw<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> {
let header = buffer
.take_obj_front::<Header<M>>()
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
let message_body = buffer.into_rest();
if header.prefix.msg_type!= M::TYPE.into() {
return debug_err!(Err(ParseError::NotExpected), "unexpected message type");
}
Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>>
FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn try_from_raw_with(
raw: IcmpPacketRaw<I, B, M>,
args: IcmpParseArgs<I::Addr>,
) -> ParseResult<Self> {
let IcmpPacketRaw { header, message_body, _marker } = raw;
if!M::Body::EXPECTS_BODY &&!message_body.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!(
ParseError::Format,
"unrecognized code: {}",
header.prefix.code
))?;
let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip)
.ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?;
if checksum!= [0, 0] {
return debug_err!(Err(ParseError::Checksum), "invalid checksum");
}
let message_body = M::Body::parse(message_body)?;
Ok(IcmpPacket { header, message_body, _marker })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>>
for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> {
IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
/// Get the ICMP body.
pub fn body(&self) -> &M::Body {
&self.message_body
}
/// Get the ICMP message code.
///
/// The code provides extra details about the message. Each message type has
/// its own set of codes that are allowed.
pub fn code(&self) -> M::Code {
// infallible since it was validated in parse
M::code_from_u8(self.header.prefix.code).unwrap()
}
/// Construct a builder with the same contents as this packet.
pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() }
}
}
fn compute_checksum_fragmented<
I: IcmpIpExt,
B: ByteSlice,
BB: packet::Fragment,
M: IcmpMessage<I, B>,
>(
header: &Header<M>,
message_body: &FragmentedByteSlice<'_, BB>,
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut c = Checksum::new();
if I::VERSION.is_v6() |
c.add_bytes(&[header.prefix.msg_type, header.prefix.code]);
c.add_bytes(&header.prefix.checksum);
c.add_bytes(header.message.as_bytes());
for p in message_body.iter_fragments() {
c.add_bytes(p);
}
Some(c.checksum())
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Compute the checksum, including the checksum field itself.
///
/// `compute_checksum` returns `None` if the version is IPv6 and the total
/// ICMP packet length overflows a u32.
fn compute_checksum(
header: &Header<M>,
message_body: &[u8],
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut body = [message_body];
compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip)
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>>
IcmpPacket<I, B, M>
{
/// Get the body of the packet that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet_body` returns as much of
/// the body of that packet as is contained in this message. For IPv4, this
/// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet_body(&self) -> &[u8] {
self.message_body.body::<I>()
}
/// Returns the original packt that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet` returns as much of the
/// body of that packet as is contained in this message. For IPv4, this is
/// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet(&self) -> &OriginalPacket<B> {
&self.message_body
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> {
/// Attempt to partially parse the original packet as an IPv4 packet.
///
/// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> {
/// Attempt to partially parse the original packet as an IPv6 packet.
///
/// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> {
/// Get the pared list of NDP options from the ICMP message.
pub fn ndp_options(&self) -> &ndp::Options<B> {
&self.message_body
}
}
/// A builder for ICMP packets.
#[derive(Debug)]
pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
src_ip: I::Addr,
dst_ip: I::Addr,
code: M::Code,
msg: M,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> {
/// Construct a new `IcmpPacketBuilder`.
pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>(
src_ip: S,
dst_ip: D,
code: M::Code,
msg: M,
) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg }
}
}
// TODO(joshlf): Figure out a way to split body and non-body message types by
// trait and implement PacketBuilder for some and InnerPacketBuilder for others.
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder
for IcmpPacketBuilder<I, B, M>
{
fn constraints(&self) -> PacketConstraints {
// The maximum body length constraint to make sure the body length
// doesn't overflow the 32-bit length field in the pseudo-header used
// for calculating the checksum.
//
// Note that, for messages that don't take bodies, it's important that
// we don't just set this to 0. Trying to serialize a body in a message
// type which doesn't take bodies is a programmer error, so we should
// panic in that case. Setting the max_body_len to 0 would surface the
// issue as an MTU error, which would hide the underlying problem.
// Instead, we assert in serialize. Eventually, we will hopefully figure
// out a way to implement InnerPacketBuilder (rather than PacketBuilder)
// for these message types, and this won't be an issue anymore.
PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize)
}
fn serialize(&self, buffer: &mut SerializeBuffer<'_>) {
use packet::BufferViewMut;
let (mut prefix, message_body, _) = buffer.parts();
// implements BufferViewMut, giving us take_obj_xxx_zero methods
let mut prefix = &mut prefix;
assert!(
M::Body::EXPECTS_BODY || message_body.is_empty(),
"body provided for message that doesn't take a body"
);
// SECURITY: | {
c.add_bytes(src_ip.bytes());
c.add_bytes(dst_ip.bytes());
let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len();
let mut len_bytes = [0; 4];
NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?);
c.add_bytes(&len_bytes[..]);
c.add_bytes(&[0, 0, 0]);
c.add_bytes(&[IpProto::Icmpv6.into()]);
} | conditional_block |
mod.rs | caller
/// cannot know ahead of time which type to use, `peek_message_type` can be used
/// to peek at the header first to figure out which static type should be used
/// in a subsequent call to `parse`.
///
/// Note that `peek_message_type` only inspects certain fields in the header,
/// and so `peek_message_type` succeeding does not guarantee that a subsequent
/// call to `parse` will also succeed.
pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> {
let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes)
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
MessageType::try_from(hdr_pfx.msg_type).map_err(|_| {
debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,)
})
}
/// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`.
///
/// This trait is kept separate from `IcmpIpExt` to not require a type parameter
/// that implements `ByteSlice`.
pub trait IcmpIpTypes: Ip {
/// The type of an ICMP parameter problem code.
///
/// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this
/// is `Icmpv6ParameterProblemCode`.
type ParameterProblemCode: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter problem pointer.
///
/// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`.
type ParameterProblemPointer: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter header length.
///
/// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`.
type HeaderLen: PartialEq + Send + Sync + Debug;
}
// A default implementation for any I: Ip. This is to convince the Rust compiler
// that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know
// that Ipv4 and Ipv6 are the only types implementing Ip and so, since we
// implement IcmpIpTypes for both of these types, this is fine. The compiler isn't
// so smart. This implementation should never actually be used.
impl<I: Ip> IcmpIpTypes for I {
default type ParameterProblemCode = Never;
default type ParameterProblemPointer = Never;
default type HeaderLen = Never;
}
impl IcmpIpTypes for Ipv4 {
type ParameterProblemCode = Icmpv4ParameterProblemCode;
type ParameterProblemPointer = u8;
type HeaderLen = usize;
}
impl IcmpIpTypes for Ipv6 {
type ParameterProblemCode = Icmpv6ParameterProblemCode;
type ParameterProblemPointer = u32;
type HeaderLen = ();
}
/// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`.
pub trait IcmpIpExt: Ip {
/// The type of ICMP messages.
///
/// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is
/// `Icmpv6MessageType`.
type IcmpMessageType: IcmpMessageType;
/// The identifier for this ICMP version.
///
/// This value will be found in an IPv4 packet's Protocol field (for ICMPv4
/// packets) or an IPv6 fixed header's or last extension header's Next
/// Heeader field (for ICMPv6 packets).
const ICMP_IP_PROTO: IpProto;
/// Compute the length of the header of the packet prefix stored in `bytes`.
///
/// Given the prefix of a packet stored in `bytes`, compute the length of
/// the header of that packet, or `bytes.len()` if `bytes` does not contain
/// the entire header. If the version is IPv6, the returned length should
/// include all extension headers.
fn header_len(bytes: &[u8]) -> usize;
}
impl IcmpIpExt for Ipv4 {
type IcmpMessageType = Icmpv4MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmp;
fn header_len(bytes: &[u8]) -> usize {
if bytes.len() < ipv4::IPV4_MIN_HDR_LEN {
return bytes.len();
}
let (header_prefix, _) =
LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap();
cmp::min(header_prefix.ihl() as usize * 4, bytes.len())
}
}
impl IcmpIpExt for Ipv6 {
type IcmpMessageType = Icmpv6MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6;
// TODO: Re-implement this in terms of partial parsing, and then get rid of
// the `header_len` method.
fn header_len(_bytes: &[u8]) -> usize {
// NOTE: We panic here rather than doing log_unimplemented! because
// there's no sane default value for this function. If it's called, it
// doesn't make sense for the program to continue executing; if we did,
// it would cause bugs in the caller.
unimplemented!()
}
}
/// An ICMP or ICMPv6 packet
///
/// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet`
pub trait IcmpPacketType<B: ByteSlice, I: Ip>:
Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError>
{
}
impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {}
impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {}
// TODO(joshlf): Once we have generic associated types, refactor this so that we
// don't have to bind B ahead of time. Removing that requirement would make some
// APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter
// from them as well.
/// `MessageBody` represents the parsed body of the ICMP packet.
///
/// - For messages that expect no body, the `MessageBody` is of type `()`.
/// - For NDP messages, the `MessageBody` is of the type `ndp::Options`.
/// - For all other messages, the `MessageBody` will be of the type
/// `OriginalPacket`, which is a thin wrapper around `B`.
pub trait MessageBody<B>: Sized {
/// Whether or not a message body is expected in an ICMP packet.
const EXPECTS_BODY: bool = true;
/// Parse the MessageBody from the provided bytes.
fn parse(bytes: B) -> ParseResult<Self>
where
B: ByteSlice;
/// The length of the underlying buffer.
fn len(&self) -> usize
where
B: ByteSlice;
/// Is the body empty?
///
/// `b.is_empty()` is equivalent to `b.len() == 0`.
fn is_empty(&self) -> bool
where
B: ByteSlice,
{
self.len() == 0
}
/// Return the underlying bytes.
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>;
}
impl<B> MessageBody<B> for () {
const EXPECTS_BODY: bool = false;
fn parse(bytes: B) -> ParseResult<()>
where
B: ByteSlice,
{
if!bytes.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
Ok(())
}
fn len(&self) -> usize {
0
}
fn bytes(&self) -> &[u8] {
&[]
}
}
/// A thin wrapper around B which implements `MessageBody`.
#[derive(Debug)]
pub struct OriginalPacket<B>(B);
impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> {
/// Returns the the body of the original packet.
pub fn body<I: IcmpIpExt>(&self) -> &[u8] {
// TODO(joshlf): Can these debug_asserts be triggered by external input?
let header_len = I::header_len(&self.0);
debug_assert!(header_len <= self.0.len());
debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8);
&self.0[header_len..]
}
}
impl<B> MessageBody<B> for OriginalPacket<B> {
fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> {
Ok(OriginalPacket(bytes))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.0.len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
&self.0
}
}
impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> {
fn parse(bytes: B) -> ParseResult<Options<B, O>>
where
B: ByteSlice,
{
Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options"))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.bytes().len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
self.bytes()
}
}
/// An ICMP message.
pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>:
Sized + Copy + FromBytes + AsBytes + Unaligned
{
/// The type of codes used with this message.
///
/// The ICMP header includes an 8-bit "code" field. For a given message
/// type, different values of this field carry different meanings. Not all
/// code values are used - some may be invalid. This type represents a
/// parsed code. For example, for TODO, it is the TODO type.
type Code: Into<u8> + Copy + Debug;
/// The type of the body used with this message.
type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn | <S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> {
IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() }
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()>
for IcmpPacketRaw<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> {
let header = buffer
.take_obj_front::<Header<M>>()
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
let message_body = buffer.into_rest();
if header.prefix.msg_type!= M::TYPE.into() {
return debug_err!(Err(ParseError::NotExpected), "unexpected message type");
}
Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>>
FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn try_from_raw_with(
raw: IcmpPacketRaw<I, B, M>,
args: IcmpParseArgs<I::Addr>,
) -> ParseResult<Self> {
let IcmpPacketRaw { header, message_body, _marker } = raw;
if!M::Body::EXPECTS_BODY &&!message_body.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!(
ParseError::Format,
"unrecognized code: {}",
header.prefix.code
))?;
let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip)
.ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?;
if checksum!= [0, 0] {
return debug_err!(Err(ParseError::Checksum), "invalid checksum");
}
let message_body = M::Body::parse(message_body)?;
Ok(IcmpPacket { header, message_body, _marker })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>>
for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> {
IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
/// Get the ICMP body.
pub fn body(&self) -> &M::Body {
&self.message_body
}
/// Get the ICMP message code.
///
/// The code provides extra details about the message. Each message type has
/// its own set of codes that are allowed.
pub fn code(&self) -> M::Code {
// infallible since it was validated in parse
M::code_from_u8(self.header.prefix.code).unwrap()
}
/// Construct a builder with the same contents as this packet.
pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() }
}
}
fn compute_checksum_fragmented<
I: IcmpIpExt,
B: ByteSlice,
BB: packet::Fragment,
M: IcmpMessage<I, B>,
>(
header: &Header<M>,
message_body: &FragmentedByteSlice<'_, BB>,
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut c = Checksum::new();
if I::VERSION.is_v6() {
c.add_bytes(src_ip.bytes());
c.add_bytes(dst_ip.bytes());
let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len();
let mut len_bytes = [0; 4];
NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?);
c.add_bytes(&len_bytes[..]);
c.add_bytes(&[0, 0, 0]);
c.add_bytes(&[IpProto::Icmpv6.into()]);
}
c.add_bytes(&[header.prefix.msg_type, header.prefix.code]);
c.add_bytes(&header.prefix.checksum);
c.add_bytes(header.message.as_bytes());
for p in message_body.iter_fragments() {
c.add_bytes(p);
}
Some(c.checksum())
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Compute the checksum, including the checksum field itself.
///
/// `compute_checksum` returns `None` if the version is IPv6 and the total
/// ICMP packet length overflows a u32.
fn compute_checksum(
header: &Header<M>,
message_body: &[u8],
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut body = [message_body];
compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip)
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>>
IcmpPacket<I, B, M>
{
/// Get the body of the packet that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet_body` returns as much of
/// the body of that packet as is contained in this message. For IPv4, this
/// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet_body(&self) -> &[u8] {
self.message_body.body::<I>()
}
/// Returns the original packt that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet` returns as much of the
/// body of that packet as is contained in this message. For IPv4, this is
/// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet(&self) -> &OriginalPacket<B> {
&self.message_body
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> {
/// Attempt to partially parse the original packet as an IPv4 packet.
///
/// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> {
/// Attempt to partially parse the original packet as an IPv6 packet.
///
/// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> {
/// Get the pared list of NDP options from the ICMP message.
pub fn ndp_options(&self) -> &ndp::Options<B> {
&self.message_body
}
}
/// A builder for ICMP packets.
#[derive(Debug)]
pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
src_ip: I::Addr,
dst_ip: I::Addr,
code: M::Code,
msg: M,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> {
/// Construct a new `IcmpPacketBuilder`.
pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>(
src_ip: S,
dst_ip: D,
code: M::Code,
msg: M,
) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg }
}
}
// TODO(joshlf): Figure out a way to split body and non-body message types by
// trait and implement PacketBuilder for some and InnerPacketBuilder for others.
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder
for IcmpPacketBuilder<I, B, M>
{
fn constraints(&self) -> PacketConstraints {
// The maximum body length constraint to make sure the body length
// doesn't overflow the 32-bit length field in the pseudo-header used
// for calculating the checksum.
//
// Note that, for messages that don't take bodies, it's important that
// we don't just set this to 0. Trying to serialize a body in a message
// type which doesn't take bodies is a programmer error, so we should
// panic in that case. Setting the max_body_len to 0 would surface the
// issue as an MTU error, which would hide the underlying problem.
// Instead, we assert in serialize. Eventually, we will hopefully figure
// out a way to implement InnerPacketBuilder (rather than PacketBuilder)
// for these message types, and this won't be an issue anymore.
PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize)
}
fn serialize(&self, buffer: &mut SerializeBuffer<'_>) {
use packet::BufferViewMut;
let (mut prefix, message_body, _) = buffer.parts();
// implements BufferViewMut, giving us take_obj_xxx_zero methods
let mut prefix = &mut prefix;
assert!(
M::Body::EXPECTS_BODY || message_body.is_empty(),
"body provided for message that doesn't take a body"
);
// SECURITY: | new | identifier_name |
mod.rs | so the caller
/// cannot know ahead of time which type to use, `peek_message_type` can be used
/// to peek at the header first to figure out which static type should be used
/// in a subsequent call to `parse`.
///
/// Note that `peek_message_type` only inspects certain fields in the header,
/// and so `peek_message_type` succeeding does not guarantee that a subsequent
/// call to `parse` will also succeed.
pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> {
let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes)
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
MessageType::try_from(hdr_pfx.msg_type).map_err(|_| {
debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,)
})
}
/// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`.
///
/// This trait is kept separate from `IcmpIpExt` to not require a type parameter
/// that implements `ByteSlice`.
pub trait IcmpIpTypes: Ip {
/// The type of an ICMP parameter problem code.
///
/// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this
/// is `Icmpv6ParameterProblemCode`.
type ParameterProblemCode: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter problem pointer.
///
/// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`.
type ParameterProblemPointer: PartialEq + Send + Sync + Debug;
/// The type of an ICMP parameter header length.
///
/// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`.
type HeaderLen: PartialEq + Send + Sync + Debug;
}
// A default implementation for any I: Ip. This is to convince the Rust compiler
// that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know
// that Ipv4 and Ipv6 are the only types implementing Ip and so, since we
// implement IcmpIpTypes for both of these types, this is fine. The compiler isn't
// so smart. This implementation should never actually be used.
impl<I: Ip> IcmpIpTypes for I {
default type ParameterProblemCode = Never;
default type ParameterProblemPointer = Never;
default type HeaderLen = Never;
}
impl IcmpIpTypes for Ipv4 {
type ParameterProblemCode = Icmpv4ParameterProblemCode;
type ParameterProblemPointer = u8;
type HeaderLen = usize;
}
impl IcmpIpTypes for Ipv6 {
type ParameterProblemCode = Icmpv6ParameterProblemCode;
type ParameterProblemPointer = u32;
type HeaderLen = ();
}
/// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`.
pub trait IcmpIpExt: Ip {
/// The type of ICMP messages.
///
/// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is
/// `Icmpv6MessageType`.
type IcmpMessageType: IcmpMessageType;
/// The identifier for this ICMP version.
///
/// This value will be found in an IPv4 packet's Protocol field (for ICMPv4
/// packets) or an IPv6 fixed header's or last extension header's Next
/// Heeader field (for ICMPv6 packets).
const ICMP_IP_PROTO: IpProto;
/// Compute the length of the header of the packet prefix stored in `bytes`.
///
/// Given the prefix of a packet stored in `bytes`, compute the length of
/// the header of that packet, or `bytes.len()` if `bytes` does not contain
/// the entire header. If the version is IPv6, the returned length should
/// include all extension headers.
fn header_len(bytes: &[u8]) -> usize;
}
impl IcmpIpExt for Ipv4 {
type IcmpMessageType = Icmpv4MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmp;
fn header_len(bytes: &[u8]) -> usize {
if bytes.len() < ipv4::IPV4_MIN_HDR_LEN {
return bytes.len();
}
let (header_prefix, _) =
LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap();
cmp::min(header_prefix.ihl() as usize * 4, bytes.len())
}
}
impl IcmpIpExt for Ipv6 {
type IcmpMessageType = Icmpv6MessageType;
const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6;
// TODO: Re-implement this in terms of partial parsing, and then get rid of
// the `header_len` method.
fn header_len(_bytes: &[u8]) -> usize {
// NOTE: We panic here rather than doing log_unimplemented! because
// there's no sane default value for this function. If it's called, it
// doesn't make sense for the program to continue executing; if we did,
// it would cause bugs in the caller.
unimplemented!()
}
}
/// An ICMP or ICMPv6 packet
///
/// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet`
pub trait IcmpPacketType<B: ByteSlice, I: Ip>:
Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError>
{
}
impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {}
impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {}
// TODO(joshlf): Once we have generic associated types, refactor this so that we
// don't have to bind B ahead of time. Removing that requirement would make some
// APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter
// from them as well.
/// `MessageBody` represents the parsed body of the ICMP packet.
///
/// - For messages that expect no body, the `MessageBody` is of type `()`.
/// - For NDP messages, the `MessageBody` is of the type `ndp::Options`.
/// - For all other messages, the `MessageBody` will be of the type
/// `OriginalPacket`, which is a thin wrapper around `B`.
pub trait MessageBody<B>: Sized {
/// Whether or not a message body is expected in an ICMP packet.
const EXPECTS_BODY: bool = true;
/// Parse the MessageBody from the provided bytes.
fn parse(bytes: B) -> ParseResult<Self>
where
B: ByteSlice;
/// The length of the underlying buffer.
fn len(&self) -> usize
where
B: ByteSlice;
/// Is the body empty?
///
/// `b.is_empty()` is equivalent to `b.len() == 0`.
fn is_empty(&self) -> bool
where
B: ByteSlice,
{
self.len() == 0
}
/// Return the underlying bytes.
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>;
}
impl<B> MessageBody<B> for () {
const EXPECTS_BODY: bool = false;
fn parse(bytes: B) -> ParseResult<()>
where
B: ByteSlice,
{
if!bytes.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
Ok(())
}
fn len(&self) -> usize {
0
}
fn bytes(&self) -> &[u8] {
&[]
}
}
/// A thin wrapper around B which implements `MessageBody`.
#[derive(Debug)]
pub struct OriginalPacket<B>(B);
impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> {
/// Returns the the body of the original packet.
pub fn body<I: IcmpIpExt>(&self) -> &[u8] {
// TODO(joshlf): Can these debug_asserts be triggered by external input?
let header_len = I::header_len(&self.0);
debug_assert!(header_len <= self.0.len());
debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8);
&self.0[header_len..]
}
}
impl<B> MessageBody<B> for OriginalPacket<B> {
fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> {
Ok(OriginalPacket(bytes))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.0.len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
&self.0
}
}
impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> {
fn parse(bytes: B) -> ParseResult<Options<B, O>>
where
B: ByteSlice,
{
Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options"))
}
fn len(&self) -> usize
where
B: ByteSlice,
{
self.bytes().len()
}
fn bytes(&self) -> &[u8]
where
B: Deref<Target = [u8]>,
{
self.bytes()
}
}
/// An ICMP message.
pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>:
Sized + Copy + FromBytes + AsBytes + Unaligned
{
/// The type of codes used with this message.
///
/// The ICMP header includes an 8-bit "code" field. For a given message
/// type, different values of this field carry different meanings. Not all
/// code values are used - some may be invalid. This type represents a
/// parsed code. For example, for TODO, it is the TODO type.
type Code: Into<u8> + Copy + Debug;
/// The type of the body used with this message.
type Body: MessageBody<B>;
/// The type corresponding to this message type.
///
/// The value of the "type" field in the ICMP header corresponding to
/// messages of this type.
const TYPE: I::IcmpMessageType;
/// Parse a `Code` from an 8-bit number.
///
/// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all
/// values for this field are valid. If an invalid value is passed,
/// `code_from_u8` returns `None`.
fn code_from_u8(code: u8) -> Option<Self::Code>;
}
/// The type of an ICMP message.
///
/// `IcmpMessageType` is implemented by `Icmpv4MessageType` and
/// `Icmpv6MessageType`.
pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy {
/// Is this an error message?
///
/// For ICMP, this is true for the Destination Unreachable, Redirect, Source
/// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6,
/// this is true for the Destination Unreachable, Packet Too Big, Time
/// Exceeded, and Parameter Problem message types.
fn is_err(self) -> bool;
}
#[derive(Copy, Clone, Debug, FromBytes, Unaligned)]
#[repr(C)]
struct Header<M> {
prefix: HeaderPrefix,
message: M,
}
// So long as `M: Unaligned`, there will be no padding between the
// `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the
// alignment of `Header<M>` will be 1, meaning that no post-padding will need to
// be added to get to a multiple of the alignment. Since there is no padding,
// then so long as `M: AsBytes`, all of `Header<M>: AsBytes`.
unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> {
// We're doing a bad thing, but it's necessary until derive(AsBytes)
// supports type parameters.
fn only_derive_is_allowed_to_implement_this_trait() {}
}
/// A partially parsed and not yet validated ICMP packet.
///
/// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it
/// only requires that the header and message (in ICMPv6, these are both
/// considered part of the header) are present, and that the header has the
/// expected message type. The body may be missing (or an unexpected body may be
/// present). Other than the message type, no header, message, or body field
/// values will be validated.
///
/// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to
/// validate an [`IcmpPacketRaw`].
#[derive(Debug)]
pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: B,
_marker: PhantomData<I>,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
}
/// An ICMP packet.
///
/// An `IcmpPacket` shares its underlying memory with the byte slice it was
/// parsed from, meaning that no copying or extra allocation is necessary.
#[derive(Debug)]
pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
header: LayoutVerified<B, Header<M>>,
message_body: M::Body,
_marker: PhantomData<I>,
}
/// Arguments required to parse an ICMP packet.
pub struct IcmpParseArgs<A: IpAddress> {
src_ip: A,
dst_ip: A,
}
impl<A: IpAddress> IcmpParseArgs<A> {
/// Construct a new `IcmpParseArgs`.
pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> {
IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() }
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()>
for IcmpPacketRaw<I, B, M>
{
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata {
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> {
let header = buffer
.take_obj_front::<Header<M>>()
.ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?;
let message_body = buffer.into_rest();
if header.prefix.msg_type!= M::TYPE.into() {
return debug_err!(Err(ParseError::NotExpected), "unexpected message type");
}
Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>>
FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
{
type Error = ParseError;
fn try_from_raw_with(
raw: IcmpPacketRaw<I, B, M>,
args: IcmpParseArgs<I::Addr>,
) -> ParseResult<Self> {
let IcmpPacketRaw { header, message_body, _marker } = raw;
if!M::Body::EXPECTS_BODY &&!message_body.is_empty() {
return debug_err!(Err(ParseError::Format), "unexpected message body");
}
let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!(
ParseError::Format,
"unrecognized code: {}",
header.prefix.code
))?;
let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip)
.ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?;
if checksum!= [0, 0] {
return debug_err!(Err(ParseError::Checksum), "invalid checksum");
}
let message_body = M::Body::parse(message_body)?;
Ok(IcmpPacket { header, message_body, _marker })
}
}
impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>>
for IcmpPacket<I, B, M> | ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0)
}
fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> {
IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Get the ICMP message.
pub fn message(&self) -> &M {
&self.header.message
}
/// Get the ICMP body.
pub fn body(&self) -> &M::Body {
&self.message_body
}
/// Get the ICMP message code.
///
/// The code provides extra details about the message. Each message type has
/// its own set of codes that are allowed.
pub fn code(&self) -> M::Code {
// infallible since it was validated in parse
M::code_from_u8(self.header.prefix.code).unwrap()
}
/// Construct a builder with the same contents as this packet.
pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() }
}
}
fn compute_checksum_fragmented<
I: IcmpIpExt,
B: ByteSlice,
BB: packet::Fragment,
M: IcmpMessage<I, B>,
>(
header: &Header<M>,
message_body: &FragmentedByteSlice<'_, BB>,
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut c = Checksum::new();
if I::VERSION.is_v6() {
c.add_bytes(src_ip.bytes());
c.add_bytes(dst_ip.bytes());
let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len();
let mut len_bytes = [0; 4];
NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?);
c.add_bytes(&len_bytes[..]);
c.add_bytes(&[0, 0, 0]);
c.add_bytes(&[IpProto::Icmpv6.into()]);
}
c.add_bytes(&[header.prefix.msg_type, header.prefix.code]);
c.add_bytes(&header.prefix.checksum);
c.add_bytes(header.message.as_bytes());
for p in message_body.iter_fragments() {
c.add_bytes(p);
}
Some(c.checksum())
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> {
/// Compute the checksum, including the checksum field itself.
///
/// `compute_checksum` returns `None` if the version is IPv6 and the total
/// ICMP packet length overflows a u32.
fn compute_checksum(
header: &Header<M>,
message_body: &[u8],
src_ip: I::Addr,
dst_ip: I::Addr,
) -> Option<[u8; 2]> {
let mut body = [message_body];
compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip)
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>>
IcmpPacket<I, B, M>
{
/// Get the body of the packet that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet_body` returns as much of
/// the body of that packet as is contained in this message. For IPv4, this
/// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet_body(&self) -> &[u8] {
self.message_body.body::<I>()
}
/// Returns the original packt that caused this ICMP message.
///
/// This ICMP message contains some of the bytes of the packet that caused
/// this message to be emitted. `original_packet` returns as much of the
/// body of that packet as is contained in this message. For IPv4, this is
/// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the
/// length.
pub fn original_packet(&self) -> &OriginalPacket<B> {
&self.message_body
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> {
/// Attempt to partially parse the original packet as an IPv4 packet.
///
/// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> {
/// Attempt to partially parse the original packet as an IPv6 packet.
///
/// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on
/// the original packet.
pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>(
&self,
f: F,
) -> O {
let mut bv = self.message_body.0.deref();
f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref()))
}
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> {
/// Get the pared list of NDP options from the ICMP message.
pub fn ndp_options(&self) -> &ndp::Options<B> {
&self.message_body
}
}
/// A builder for ICMP packets.
#[derive(Debug)]
pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> {
src_ip: I::Addr,
dst_ip: I::Addr,
code: M::Code,
msg: M,
}
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> {
/// Construct a new `IcmpPacketBuilder`.
pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>(
src_ip: S,
dst_ip: D,
code: M::Code,
msg: M,
) -> IcmpPacketBuilder<I, B, M> {
IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg }
}
}
// TODO(joshlf): Figure out a way to split body and non-body message types by
// trait and implement PacketBuilder for some and InnerPacketBuilder for others.
impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder
for IcmpPacketBuilder<I, B, M>
{
fn constraints(&self) -> PacketConstraints {
// The maximum body length constraint to make sure the body length
// doesn't overflow the 32-bit length field in the pseudo-header used
// for calculating the checksum.
//
// Note that, for messages that don't take bodies, it's important that
// we don't just set this to 0. Trying to serialize a body in a message
// type which doesn't take bodies is a programmer error, so we should
// panic in that case. Setting the max_body_len to 0 would surface the
// issue as an MTU error, which would hide the underlying problem.
// Instead, we assert in serialize. Eventually, we will hopefully figure
// out a way to implement InnerPacketBuilder (rather than PacketBuilder)
// for these message types, and this won't be an issue anymore.
PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize)
}
fn serialize(&self, buffer: &mut SerializeBuffer<'_>) {
use packet::BufferViewMut;
let (mut prefix, message_body, _) = buffer.parts();
// implements BufferViewMut, giving us take_obj_xxx_zero methods
let mut prefix = &mut prefix;
assert!(
M::Body::EXPECTS_BODY || message_body.is_empty(),
"body provided for message that doesn't take a body"
);
// SECURITY: Use | {
type Error = ParseError;
fn parse_metadata(&self) -> ParseMetadata { | random_line_split |
remote_cache.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::ffi::OsString;
use std::path::Component;
use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec;
use bazel_protos::require_digest;
use fs::RelativePath;
use futures::FutureExt;
use grpc_util::headers_to_interceptor_fn;
use grpc_util::status_to_str;
use hashing::Digest;
use parking_lot::Mutex;
use remexec::action_cache_client::ActionCacheClient;
use remexec::{ActionResult, Command, FileNode, Tree};
use store::Store;
use tonic::transport::Channel;
use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata};
use crate::remote::make_execute_request;
use crate::{
Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process,
ProcessMetadata, RemoteCacheWarningsBehavior,
};
/// This `CommandRunner` implementation caches results remotely using the Action Cache service
/// of the Remote Execution API.
///
/// This runner expects to sit between the local cache CommandRunner and the CommandRunner
/// that is actually executing the Process. Thus, the local cache will be checked first,
/// then the remote cache, and then execution (local or remote) as necessary if neither cache
/// has a hit. On the way back out of the stack, the result will be stored remotely and
/// then locally.
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_client: Arc<ActionCacheClient<Channel>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
eager_fetch: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_address: &str,
root_ca_certs: Option<Vec<u8>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
eager_fetch: bool,
) -> Result<Self, String> {
let tls_client_config = if action_cache_address.starts_with("https://") {
Some(grpc_util::create_tls_config(root_ca_certs)?)
} else {
None
};
let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?;
let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter());
let action_cache_client = Arc::new(if headers.is_empty() {
ActionCacheClient::new(channel)
} else {
ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?)
});
Ok(CommandRunner {
underlying,
metadata,
executor,
store,
action_cache_client,
headers,
platform,
cache_read,
cache_write,
eager_fetch,
warnings_behavior,
read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
})
}
/// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants
/// merged final output directory to find the specific path to extract. (REAPI requires
/// output directories to be stored as `Tree` protos that contain all of the `Directory`
/// protos that constitute the directory tree.)
///
/// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path
/// gets stored on the OutputDirectory proto.
///
/// If the output directory does not exist, then returns Ok(None).
pub(crate) async fn make_tree_for_output_directory(
root_directory_digest: Digest,
directory_path: RelativePath,
store: &Store,
) -> Result<Option<Tree>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
for next_path_component in directory_path.as_ref().components() {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
// At this point, `current_directory_digest` holds the digest of the output directory.
// This will be the root of the Tree. Add it to a queue of digests to traverse.
let mut tree = Tree::default();
let mut digest_queue = VecDeque::new();
digest_queue.push_back(current_directory_digest);
while let Some(directory_digest) = digest_queue.pop_front() {
let directory = match store.load_directory(directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"illegal state: directory for digest {:?} did not exist locally",
¤t_directory_digest
))
}
};
// Add all of the digests for subdirectories into the queue so they are processed
// in future iterations of the loop.
for subdirectory_node in &directory.directories {
let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?;
digest_queue.push_back(subdirectory_digest);
}
// Store this directory either as the `root` or one of the `children` if not the root.
if directory_digest == current_directory_digest | else {
tree.children.push(directory)
}
}
Ok(Some(tree))
}
pub(crate) async fn extract_output_file(
root_directory_digest: Digest,
file_path: RelativePath,
store: &Store,
) -> Result<Option<FileNode>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
let parent_path = file_path.as_ref().parent();
let components_opt = parent_path.map(|x| x.components());
if let Some(components) = components_opt {
for next_path_component in components {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
}
// Load the final directory.
let directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => return Ok(None),
};
// Search for the file.
let file_base_name = file_path.as_ref().file_name().unwrap();
Ok(
directory
.files
.iter()
.find(|node| {
let name = OsString::from(&node.name);
name == file_base_name
})
.cloned(),
)
}
/// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing
/// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache.
///
/// This function also returns a vector of all `Digest`s referenced directly and indirectly by
/// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The
/// digests may include both File and Tree digests.)
pub(crate) async fn make_action_result(
&self,
command: &Command,
result: &FallibleProcessResultWithPlatform,
store: &Store,
) -> Result<(ActionResult, Vec<Digest>), String> {
// Keep track of digests that need to be uploaded.
let mut digests = HashSet::new();
let mut action_result = ActionResult {
exit_code: result.exit_code,
stdout_digest: Some(result.stdout_digest.into()),
stderr_digest: Some(result.stderr_digest.into()),
execution_metadata: Some(result.metadata.clone().into()),
..ActionResult::default()
};
digests.insert(result.stdout_digest);
digests.insert(result.stderr_digest);
for output_directory in &command.output_directories {
let tree = match Self::make_tree_for_output_directory(
result.output_directory,
RelativePath::new(output_directory).unwrap(),
store,
)
.await?
{
Some(t) => t,
None => continue,
};
let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
)
.await?;
// Create an ActionResult from the process result.
let (action_result, digests_for_action_result) = self
.make_action_result(command, result, &self.store)
.await?;
// Ensure that all digests referenced by directly and indirectly by the ActionResult
// have been uploaded to the remote cache.
self
.store
.ensure_remote_has_recursive(digests_for_action_result)
.await?;
let update_action_cache_request = remexec::UpdateActionResultRequest {
instance_name: metadata
.instance_name
.as_ref()
.cloned()
.unwrap_or_else(|| "".to_owned()),
action_digest: Some(action_digest.into()),
action_result: Some(action_result),
..remexec::UpdateActionResultRequest::default()
};
let mut client = self.action_cache_client.as_ref().clone();
client
.update_action_result(update_action_cache_request)
.await
.map_err(status_to_str)?;
Ok(())
}
fn log_cache_error(&self, err: String, err_type: CacheErrorType) {
let err_count = {
let mut errors_counter = match err_type {
CacheErrorType::ReadError => self.read_errors_counter.lock(),
CacheErrorType::WriteError => self.write_errors_counter.lock(),
};
let count = errors_counter.entry(err.clone()).or_insert(0);
*count += 1;
*count
};
let failure_desc = match err_type {
CacheErrorType::ReadError => "read from",
CacheErrorType::WriteError => "write to",
};
let log_msg = format!(
"Failed to {} remote cache ({} occurrences so far): {}",
failure_desc, err_count, err
);
let log_at_warn = match self.warnings_behavior {
RemoteCacheWarningsBehavior::Ignore => false,
RemoteCacheWarningsBehavior::FirstOnly => err_count == 1,
RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(),
};
if log_at_warn {
log::warn!("{}", log_msg);
} else {
log::debug!("{}", log_msg);
}
}
}
enum CacheErrorType {
ReadError,
WriteError,
}
#[async_trait]
impl crate::CommandRunner for CommandRunner {
async fn run(
&self,
req: MultiPlatformProcess,
context: Context,
) -> Result<FallibleProcessResultWithPlatform, String> {
let cache_lookup_start = Instant::now();
// Construct the REv2 ExecuteRequest and related data for this execution request.
let request = self
.extract_compatible_request(&req)
.ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?;
let (action, command, _execute_request) =
make_execute_request(&request, self.metadata.clone())?;
// Ensure the action and command are stored locally.
let (command_digest, action_digest) = with_workunit(
context.workunit_store.clone(),
"ensure_action_stored_locally".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action stored locally for {:?}", action)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_stored_locally(&self.store, &command, &action),
|_, md| md,
)
.await?;
let mut local_execution_future = self.underlying.run(req, context.clone());
let result = if self.cache_read {
// A future to read from the cache and log the results accordingly.
let cache_read_future = async {
let response = with_workunit(
context.workunit_store.clone(),
"check_action_cache".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("check action cache for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::check_action_cache(
action_digest,
&self.metadata,
self.platform,
&context,
self.action_cache_client.clone(),
self.store.clone(),
self.eager_fetch,
),
|_, md| md,
)
.await;
match response {
Ok(cached_response_opt) => {
log::debug!(
"remote cache response: digest={:?}: {:?}",
action_digest,
cached_response_opt
);
cached_response_opt
}
Err(err) => {
self.log_cache_error(err, CacheErrorType::ReadError);
None
}
}
}
.boxed();
// We speculate between reading from the remote cache vs. running locally. If there was a
// cache hit, we return early because there will be no need to write to the cache. Otherwise,
// we run the process locally and will possibly write it to the cache later.
tokio::select! {
cache_result = cache_read_future => {
if let Some(cached_response) = cache_result {
let lookup_elapsed = cache_lookup_start.elapsed();
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1);
if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) {
let time_saved = time_saved.as_millis() as u64;
context
.workunit_store
.increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved);
context
.workunit_store
.record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved);
}
return Ok(cached_response);
} else {
// Note that we don't increment a counter here, as there is nothing of note in this
// scenario: the remote cache did not save unnecessary local work, nor was the remote
// trip unusually slow such that local execution was faster.
local_execution_future.await?
}
}
local_result = &mut local_execution_future => {
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1);
local_result?
}
}
} else {
local_execution_future.await?
};
if result.exit_code == 0 && self.cache_write {
let command_runner = self.clone();
let result = result.clone();
let context2 = context.clone();
// NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works.
let cache_write_future = async move {
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteStarted, 1);
let write_result = command_runner
.update_action_cache(
&context2,
&request,
&result,
&command_runner.metadata,
&command,
action_digest,
command_digest,
)
.await;
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteFinished, 1);
if let Err(err) = write_result {
command_runner.log_cache_error(err, CacheErrorType::WriteError);
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteErrors, 1);
};
}
.boxed();
let _write_join = self.executor.spawn(with_workunit(
context.workunit_store,
"remote_cache_write".to_owned(),
WorkunitMetadata {
level: Level::Trace,
..WorkunitMetadata::default()
},
cache_write_future,
|_, md| md,
));
}
Ok(result)
}
fn extract_compatible_request(&self, req: &MultiPlatformProcess) -> Option<Process> {
self.underlying.extract_compatible_request(req)
}
}
| {
tree.root = Some(directory);
} | conditional_block |
remote_cache.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::ffi::OsString;
use std::path::Component;
use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec;
use bazel_protos::require_digest;
use fs::RelativePath;
use futures::FutureExt;
use grpc_util::headers_to_interceptor_fn;
use grpc_util::status_to_str;
use hashing::Digest;
use parking_lot::Mutex;
use remexec::action_cache_client::ActionCacheClient;
use remexec::{ActionResult, Command, FileNode, Tree};
use store::Store;
use tonic::transport::Channel;
use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata};
use crate::remote::make_execute_request;
use crate::{
Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process,
ProcessMetadata, RemoteCacheWarningsBehavior,
};
/// This `CommandRunner` implementation caches results remotely using the Action Cache service
/// of the Remote Execution API.
///
/// This runner expects to sit between the local cache CommandRunner and the CommandRunner
/// that is actually executing the Process. Thus, the local cache will be checked first,
/// then the remote cache, and then execution (local or remote) as necessary if neither cache
/// has a hit. On the way back out of the stack, the result will be stored remotely and
/// then locally.
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_client: Arc<ActionCacheClient<Channel>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
eager_fetch: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_address: &str,
root_ca_certs: Option<Vec<u8>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
eager_fetch: bool,
) -> Result<Self, String> {
let tls_client_config = if action_cache_address.starts_with("https://") {
Some(grpc_util::create_tls_config(root_ca_certs)?)
} else {
None
};
let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?;
let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter());
let action_cache_client = Arc::new(if headers.is_empty() {
ActionCacheClient::new(channel)
} else {
ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?)
});
Ok(CommandRunner {
underlying,
metadata,
executor,
store,
action_cache_client,
headers,
platform,
cache_read,
cache_write,
eager_fetch,
warnings_behavior,
read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
})
}
/// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants
/// merged final output directory to find the specific path to extract. (REAPI requires
/// output directories to be stored as `Tree` protos that contain all of the `Directory`
/// protos that constitute the directory tree.)
///
/// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path
/// gets stored on the OutputDirectory proto.
///
/// If the output directory does not exist, then returns Ok(None).
pub(crate) async fn make_tree_for_output_directory(
root_directory_digest: Digest,
directory_path: RelativePath,
store: &Store,
) -> Result<Option<Tree>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
for next_path_component in directory_path.as_ref().components() {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
// At this point, `current_directory_digest` holds the digest of the output directory.
// This will be the root of the Tree. Add it to a queue of digests to traverse.
let mut tree = Tree::default();
let mut digest_queue = VecDeque::new();
digest_queue.push_back(current_directory_digest);
while let Some(directory_digest) = digest_queue.pop_front() {
let directory = match store.load_directory(directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"illegal state: directory for digest {:?} did not exist locally",
¤t_directory_digest
))
}
};
// Add all of the digests for subdirectories into the queue so they are processed
// in future iterations of the loop.
for subdirectory_node in &directory.directories {
let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?;
digest_queue.push_back(subdirectory_digest);
}
// Store this directory either as the `root` or one of the `children` if not the root.
if directory_digest == current_directory_digest {
tree.root = Some(directory);
} else {
tree.children.push(directory)
}
}
Ok(Some(tree))
}
pub(crate) async fn extract_output_file(
root_directory_digest: Digest,
file_path: RelativePath,
store: &Store,
) -> Result<Option<FileNode>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
let parent_path = file_path.as_ref().parent();
let components_opt = parent_path.map(|x| x.components());
if let Some(components) = components_opt {
for next_path_component in components {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
}
// Load the final directory.
let directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => return Ok(None),
};
// Search for the file.
let file_base_name = file_path.as_ref().file_name().unwrap();
Ok(
directory
.files
.iter()
.find(|node| {
let name = OsString::from(&node.name);
name == file_base_name
})
.cloned(),
)
}
/// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing
/// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache.
///
/// This function also returns a vector of all `Digest`s referenced directly and indirectly by
/// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The
/// digests may include both File and Tree digests.)
pub(crate) async fn make_action_result(
&self,
command: &Command,
result: &FallibleProcessResultWithPlatform,
store: &Store,
) -> Result<(ActionResult, Vec<Digest>), String> {
// Keep track of digests that need to be uploaded.
let mut digests = HashSet::new();
let mut action_result = ActionResult {
exit_code: result.exit_code,
stdout_digest: Some(result.stdout_digest.into()),
stderr_digest: Some(result.stderr_digest.into()),
execution_metadata: Some(result.metadata.clone().into()),
..ActionResult::default()
};
digests.insert(result.stdout_digest);
digests.insert(result.stderr_digest);
for output_directory in &command.output_directories {
let tree = match Self::make_tree_for_output_directory(
result.output_directory,
RelativePath::new(output_directory).unwrap(),
store,
)
.await?
{
Some(t) => t,
None => continue,
};
let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
)
.await?;
// Create an ActionResult from the process result.
let (action_result, digests_for_action_result) = self
.make_action_result(command, result, &self.store)
.await?;
// Ensure that all digests referenced by directly and indirectly by the ActionResult
// have been uploaded to the remote cache.
self
.store
.ensure_remote_has_recursive(digests_for_action_result)
.await?;
let update_action_cache_request = remexec::UpdateActionResultRequest {
instance_name: metadata
.instance_name
.as_ref()
.cloned()
.unwrap_or_else(|| "".to_owned()),
action_digest: Some(action_digest.into()),
action_result: Some(action_result),
..remexec::UpdateActionResultRequest::default()
};
let mut client = self.action_cache_client.as_ref().clone();
client
.update_action_result(update_action_cache_request)
.await |
Ok(())
}
fn log_cache_error(&self, err: String, err_type: CacheErrorType) {
let err_count = {
let mut errors_counter = match err_type {
CacheErrorType::ReadError => self.read_errors_counter.lock(),
CacheErrorType::WriteError => self.write_errors_counter.lock(),
};
let count = errors_counter.entry(err.clone()).or_insert(0);
*count += 1;
*count
};
let failure_desc = match err_type {
CacheErrorType::ReadError => "read from",
CacheErrorType::WriteError => "write to",
};
let log_msg = format!(
"Failed to {} remote cache ({} occurrences so far): {}",
failure_desc, err_count, err
);
let log_at_warn = match self.warnings_behavior {
RemoteCacheWarningsBehavior::Ignore => false,
RemoteCacheWarningsBehavior::FirstOnly => err_count == 1,
RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(),
};
if log_at_warn {
log::warn!("{}", log_msg);
} else {
log::debug!("{}", log_msg);
}
}
}
enum CacheErrorType {
ReadError,
WriteError,
}
#[async_trait]
impl crate::CommandRunner for CommandRunner {
async fn run(
&self,
req: MultiPlatformProcess,
context: Context,
) -> Result<FallibleProcessResultWithPlatform, String> {
let cache_lookup_start = Instant::now();
// Construct the REv2 ExecuteRequest and related data for this execution request.
let request = self
.extract_compatible_request(&req)
.ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?;
let (action, command, _execute_request) =
make_execute_request(&request, self.metadata.clone())?;
// Ensure the action and command are stored locally.
let (command_digest, action_digest) = with_workunit(
context.workunit_store.clone(),
"ensure_action_stored_locally".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action stored locally for {:?}", action)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_stored_locally(&self.store, &command, &action),
|_, md| md,
)
.await?;
let mut local_execution_future = self.underlying.run(req, context.clone());
let result = if self.cache_read {
// A future to read from the cache and log the results accordingly.
let cache_read_future = async {
let response = with_workunit(
context.workunit_store.clone(),
"check_action_cache".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("check action cache for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::check_action_cache(
action_digest,
&self.metadata,
self.platform,
&context,
self.action_cache_client.clone(),
self.store.clone(),
self.eager_fetch,
),
|_, md| md,
)
.await;
match response {
Ok(cached_response_opt) => {
log::debug!(
"remote cache response: digest={:?}: {:?}",
action_digest,
cached_response_opt
);
cached_response_opt
}
Err(err) => {
self.log_cache_error(err, CacheErrorType::ReadError);
None
}
}
}
.boxed();
// We speculate between reading from the remote cache vs. running locally. If there was a
// cache hit, we return early because there will be no need to write to the cache. Otherwise,
// we run the process locally and will possibly write it to the cache later.
tokio::select! {
cache_result = cache_read_future => {
if let Some(cached_response) = cache_result {
let lookup_elapsed = cache_lookup_start.elapsed();
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1);
if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) {
let time_saved = time_saved.as_millis() as u64;
context
.workunit_store
.increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved);
context
.workunit_store
.record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved);
}
return Ok(cached_response);
} else {
// Note that we don't increment a counter here, as there is nothing of note in this
// scenario: the remote cache did not save unnecessary local work, nor was the remote
// trip unusually slow such that local execution was faster.
local_execution_future.await?
}
}
local_result = &mut local_execution_future => {
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1);
local_result?
}
}
} else {
local_execution_future.await?
};
if result.exit_code == 0 && self.cache_write {
let command_runner = self.clone();
let result = result.clone();
let context2 = context.clone();
// NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works.
let cache_write_future = async move {
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteStarted, 1);
let write_result = command_runner
.update_action_cache(
&context2,
&request,
&result,
&command_runner.metadata,
&command,
action_digest,
command_digest,
)
.await;
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteFinished, 1);
if let Err(err) = write_result {
command_runner.log_cache_error(err, CacheErrorType::WriteError);
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteErrors, 1);
};
}
.boxed();
let _write_join = self.executor.spawn(with_workunit(
context.workunit_store,
"remote_cache_write".to_owned(),
WorkunitMetadata {
level: Level::Trace,
..WorkunitMetadata::default()
},
cache_write_future,
|_, md| md,
));
}
Ok(result)
}
fn extract_compatible_request(&self, req: &MultiPlatformProcess) -> Option<Process> {
self.underlying.extract_compatible_request(req)
}
} | .map_err(status_to_str)?; | random_line_split |
remote_cache.rs | use std::collections::{BTreeMap, HashSet, VecDeque};
use std::ffi::OsString;
use std::path::Component;
use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec;
use bazel_protos::require_digest;
use fs::RelativePath;
use futures::FutureExt;
use grpc_util::headers_to_interceptor_fn;
use grpc_util::status_to_str;
use hashing::Digest;
use parking_lot::Mutex;
use remexec::action_cache_client::ActionCacheClient;
use remexec::{ActionResult, Command, FileNode, Tree};
use store::Store;
use tonic::transport::Channel;
use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata};
use crate::remote::make_execute_request;
use crate::{
Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process,
ProcessMetadata, RemoteCacheWarningsBehavior,
};
/// This `CommandRunner` implementation caches results remotely using the Action Cache service
/// of the Remote Execution API.
///
/// This runner expects to sit between the local cache CommandRunner and the CommandRunner
/// that is actually executing the Process. Thus, the local cache will be checked first,
/// then the remote cache, and then execution (local or remote) as necessary if neither cache
/// has a hit. On the way back out of the stack, the result will be stored remotely and
/// then locally.
#[derive(Clone)]
pub struct CommandRunner {
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_client: Arc<ActionCacheClient<Channel>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
eager_fetch: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>,
}
impl CommandRunner {
pub fn new(
underlying: Arc<dyn crate::CommandRunner>,
metadata: ProcessMetadata,
executor: task_executor::Executor,
store: Store,
action_cache_address: &str,
root_ca_certs: Option<Vec<u8>>,
headers: BTreeMap<String, String>,
platform: Platform,
cache_read: bool,
cache_write: bool,
warnings_behavior: RemoteCacheWarningsBehavior,
eager_fetch: bool,
) -> Result<Self, String> {
let tls_client_config = if action_cache_address.starts_with("https://") {
Some(grpc_util::create_tls_config(root_ca_certs)?)
} else {
None
};
let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?;
let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter());
let action_cache_client = Arc::new(if headers.is_empty() {
ActionCacheClient::new(channel)
} else {
ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?)
});
Ok(CommandRunner {
underlying,
metadata,
executor,
store,
action_cache_client,
headers,
platform,
cache_read,
cache_write,
eager_fetch,
warnings_behavior,
read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())),
})
}
/// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants
/// merged final output directory to find the specific path to extract. (REAPI requires
/// output directories to be stored as `Tree` protos that contain all of the `Directory`
/// protos that constitute the directory tree.)
///
/// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path
/// gets stored on the OutputDirectory proto.
///
/// If the output directory does not exist, then returns Ok(None).
pub(crate) async fn make_tree_for_output_directory(
root_directory_digest: Digest,
directory_path: RelativePath,
store: &Store,
) -> Result<Option<Tree>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
for next_path_component in directory_path.as_ref().components() {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
// At this point, `current_directory_digest` holds the digest of the output directory.
// This will be the root of the Tree. Add it to a queue of digests to traverse.
let mut tree = Tree::default();
let mut digest_queue = VecDeque::new();
digest_queue.push_back(current_directory_digest);
while let Some(directory_digest) = digest_queue.pop_front() {
let directory = match store.load_directory(directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"illegal state: directory for digest {:?} did not exist locally",
¤t_directory_digest
))
}
};
// Add all of the digests for subdirectories into the queue so they are processed
// in future iterations of the loop.
for subdirectory_node in &directory.directories {
let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?;
digest_queue.push_back(subdirectory_digest);
}
// Store this directory either as the `root` or one of the `children` if not the root.
if directory_digest == current_directory_digest {
tree.root = Some(directory);
} else {
tree.children.push(directory)
}
}
Ok(Some(tree))
}
pub(crate) async fn extract_output_file(
root_directory_digest: Digest,
file_path: RelativePath,
store: &Store,
) -> Result<Option<FileNode>, String> {
// Traverse down from the root directory digest to find the directory digest for
// the output directory.
let mut current_directory_digest = root_directory_digest;
let parent_path = file_path.as_ref().parent();
let components_opt = parent_path.map(|x| x.components());
if let Some(components) = components_opt {
for next_path_component in components {
let next_name = match next_path_component {
Component::Normal(name) => name
.to_str()
.ok_or_else(|| format!("unable to convert '{:?}' to string", name))?,
_ => return Ok(None),
};
// Load the Directory proto corresponding to `current_directory_digest`.
let current_directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => {
return Err(format!(
"Directory digest {:?} was referenced in output, but was not found in store.",
current_directory_digest
))
}
};
// Scan the current directory for the current path component.
let dir_node = match current_directory
.directories
.iter()
.find(|dn| dn.name == next_name)
{
Some(dn) => dn,
None => return Ok(None),
};
// Set the current directory digest to be the digest in the DirectoryNode just found.
// If there are more path components, then the search will continue there.
// Otherwise, if this loop ends then the final Directory digest has been found.
current_directory_digest = require_digest(dir_node.digest.as_ref())?;
}
}
// Load the final directory.
let directory = match store.load_directory(current_directory_digest).await? {
Some((dir, _)) => dir,
None => return Ok(None),
};
// Search for the file.
let file_base_name = file_path.as_ref().file_name().unwrap();
Ok(
directory
.files
.iter()
.find(|node| {
let name = OsString::from(&node.name);
name == file_base_name
})
.cloned(),
)
}
/// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing
/// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache.
///
/// This function also returns a vector of all `Digest`s referenced directly and indirectly by
/// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The
/// digests may include both File and Tree digests.)
pub(crate) async fn make_action_result(
&self,
command: &Command,
result: &FallibleProcessResultWithPlatform,
store: &Store,
) -> Result<(ActionResult, Vec<Digest>), String> {
// Keep track of digests that need to be uploaded.
let mut digests = HashSet::new();
let mut action_result = ActionResult {
exit_code: result.exit_code,
stdout_digest: Some(result.stdout_digest.into()),
stderr_digest: Some(result.stderr_digest.into()),
execution_metadata: Some(result.metadata.clone().into()),
..ActionResult::default()
};
digests.insert(result.stdout_digest);
digests.insert(result.stderr_digest);
for output_directory in &command.output_directories {
let tree = match Self::make_tree_for_output_directory(
result.output_directory,
RelativePath::new(output_directory).unwrap(),
store,
)
.await?
{
Some(t) => t,
None => continue,
};
let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?;
digests.insert(tree_digest);
action_result
.output_directories
.push(remexec::OutputDirectory {
path: output_directory.to_owned(),
tree_digest: Some(tree_digest.into()),
});
}
for output_file in &command.output_files {
let file_node = match Self::extract_output_file(
result.output_directory,
RelativePath::new(output_file).unwrap(),
store,
)
.await?
{
Some(node) => node,
None => continue,
};
let digest = require_digest(file_node.digest.as_ref())?;
digests.insert(digest);
action_result.output_files.push({
remexec::OutputFile {
digest: Some(digest.into()),
path: output_file.to_owned(),
is_executable: file_node.is_executable,
..remexec::OutputFile::default()
}
})
}
Ok((action_result, digests.into_iter().collect::<Vec<_>>()))
}
/// Stores an execution result into the remote Action Cache.
async fn update_action_cache(
&self,
context: &Context,
request: &Process,
result: &FallibleProcessResultWithPlatform,
metadata: &ProcessMetadata,
command: &Command,
action_digest: Digest,
command_digest: Digest,
) -> Result<(), String> {
// Upload the action (and related data, i.e. the embedded command and input files).
// Assumption: The Action and related data has already been stored locally.
with_workunit(
context.workunit_store.clone(),
"ensure_action_uploaded".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action uploaded for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_uploaded(
&self.store,
command_digest,
action_digest,
request.input_files,
),
|_, md| md,
)
.await?;
// Create an ActionResult from the process result.
let (action_result, digests_for_action_result) = self
.make_action_result(command, result, &self.store)
.await?;
// Ensure that all digests referenced by directly and indirectly by the ActionResult
// have been uploaded to the remote cache.
self
.store
.ensure_remote_has_recursive(digests_for_action_result)
.await?;
let update_action_cache_request = remexec::UpdateActionResultRequest {
instance_name: metadata
.instance_name
.as_ref()
.cloned()
.unwrap_or_else(|| "".to_owned()),
action_digest: Some(action_digest.into()),
action_result: Some(action_result),
..remexec::UpdateActionResultRequest::default()
};
let mut client = self.action_cache_client.as_ref().clone();
client
.update_action_result(update_action_cache_request)
.await
.map_err(status_to_str)?;
Ok(())
}
fn log_cache_error(&self, err: String, err_type: CacheErrorType) {
let err_count = {
let mut errors_counter = match err_type {
CacheErrorType::ReadError => self.read_errors_counter.lock(),
CacheErrorType::WriteError => self.write_errors_counter.lock(),
};
let count = errors_counter.entry(err.clone()).or_insert(0);
*count += 1;
*count
};
let failure_desc = match err_type {
CacheErrorType::ReadError => "read from",
CacheErrorType::WriteError => "write to",
};
let log_msg = format!(
"Failed to {} remote cache ({} occurrences so far): {}",
failure_desc, err_count, err
);
let log_at_warn = match self.warnings_behavior {
RemoteCacheWarningsBehavior::Ignore => false,
RemoteCacheWarningsBehavior::FirstOnly => err_count == 1,
RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(),
};
if log_at_warn {
log::warn!("{}", log_msg);
} else {
log::debug!("{}", log_msg);
}
}
}
enum CacheErrorType {
ReadError,
WriteError,
}
#[async_trait]
impl crate::CommandRunner for CommandRunner {
async fn run(
&self,
req: MultiPlatformProcess,
context: Context,
) -> Result<FallibleProcessResultWithPlatform, String> {
let cache_lookup_start = Instant::now();
// Construct the REv2 ExecuteRequest and related data for this execution request.
let request = self
.extract_compatible_request(&req)
.ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?;
let (action, command, _execute_request) =
make_execute_request(&request, self.metadata.clone())?;
// Ensure the action and command are stored locally.
let (command_digest, action_digest) = with_workunit(
context.workunit_store.clone(),
"ensure_action_stored_locally".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("ensure action stored locally for {:?}", action)),
..WorkunitMetadata::default()
},
crate::remote::ensure_action_stored_locally(&self.store, &command, &action),
|_, md| md,
)
.await?;
let mut local_execution_future = self.underlying.run(req, context.clone());
let result = if self.cache_read {
// A future to read from the cache and log the results accordingly.
let cache_read_future = async {
let response = with_workunit(
context.workunit_store.clone(),
"check_action_cache".to_owned(),
WorkunitMetadata {
level: Level::Trace,
desc: Some(format!("check action cache for {:?}", action_digest)),
..WorkunitMetadata::default()
},
crate::remote::check_action_cache(
action_digest,
&self.metadata,
self.platform,
&context,
self.action_cache_client.clone(),
self.store.clone(),
self.eager_fetch,
),
|_, md| md,
)
.await;
match response {
Ok(cached_response_opt) => {
log::debug!(
"remote cache response: digest={:?}: {:?}",
action_digest,
cached_response_opt
);
cached_response_opt
}
Err(err) => {
self.log_cache_error(err, CacheErrorType::ReadError);
None
}
}
}
.boxed();
// We speculate between reading from the remote cache vs. running locally. If there was a
// cache hit, we return early because there will be no need to write to the cache. Otherwise,
// we run the process locally and will possibly write it to the cache later.
tokio::select! {
cache_result = cache_read_future => {
if let Some(cached_response) = cache_result {
let lookup_elapsed = cache_lookup_start.elapsed();
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1);
if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) {
let time_saved = time_saved.as_millis() as u64;
context
.workunit_store
.increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved);
context
.workunit_store
.record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved);
}
return Ok(cached_response);
} else {
// Note that we don't increment a counter here, as there is nothing of note in this
// scenario: the remote cache did not save unnecessary local work, nor was the remote
// trip unusually slow such that local execution was faster.
local_execution_future.await?
}
}
local_result = &mut local_execution_future => {
context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1);
local_result?
}
}
} else {
local_execution_future.await?
};
if result.exit_code == 0 && self.cache_write {
let command_runner = self.clone();
let result = result.clone();
let context2 = context.clone();
// NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works.
let cache_write_future = async move {
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteStarted, 1);
let write_result = command_runner
.update_action_cache(
&context2,
&request,
&result,
&command_runner.metadata,
&command,
action_digest,
command_digest,
)
.await;
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteFinished, 1);
if let Err(err) = write_result {
command_runner.log_cache_error(err, CacheErrorType::WriteError);
context2
.workunit_store
.increment_counter(Metric::RemoteCacheWriteErrors, 1);
};
}
.boxed();
let _write_join = self.executor.spawn(with_workunit(
context.workunit_store,
"remote_cache_write".to_owned(),
WorkunitMetadata {
level: Level::Trace,
..WorkunitMetadata::default()
},
cache_write_future,
|_, md| md,
));
}
Ok(result)
}
fn | (&self, req: &MultiPlatformProcess) -> Option<Process> {
self.underlying.extract_compatible_request(req)
}
}
| extract_compatible_request | identifier_name |
bare_index.rs | /// Creates a bare index from a provided URL, opening the same location on
/// disk that cargo uses for that registry index.
pub fn from_url(url: &str) -> Result<Self, Error> {
let (dir_name, canonical_url) = url_to_local_dir(url)?;
let mut path = home::cargo_home().unwrap_or_default();
path.push("registry/index");
path.push(dir_name);
Ok(Self {
path,
url: canonical_url,
})
}
/// Creates a bare index at the provided path with the specified repository URL.
#[inline]
pub fn with_path(path: PathBuf, url: &str) -> Self {
Self {
path,
url: url.to_owned(),
}
}
/// Creates an index for the default crates.io registry, using the same
/// disk location as cargo itself.
#[inline]
pub fn new_cargo_default() -> Self {
// UNWRAP: The default index git URL is known to safely convert to a path.
Self::from_url(crate::INDEX_GIT_URL).unwrap()
}
/// Opens the local index, which acts as a kind of lock for source control
/// operations
#[inline]
pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> {
BareIndexRepo::new(self)
}
/// Get the index directory.
#[inline]
pub fn path(&self) -> &Path {
&self.path
}
}
/// Self-referential struct where `Tree` borrows from `Repository`
struct UnsafeRepoTree {
/// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`.
tree: git2::Tree<'static>,
repo: Box<git2::Repository>,
// Currently!Unpin is Rust's heuristic for self-referential structs
_self_referential: PhantomPinned,
}
/// Opened instance of [`BareIndex`]
pub struct BareIndexRepo<'a> {
inner: &'a BareIndex,
head_str: String,
rt: UnsafeRepoTree,
}
impl<'a> BareIndexRepo<'a> {
fn new(index: &'a BareIndex) -> Result<Self, Error> {
let exists = git2::Repository::discover(&index.path)
.map(|repository| {
repository
.find_remote("origin")
.ok()
// Cargo creates a checkout without an origin set,
// so default to true in case of missing origin
.map_or(true, |remote| {
remote.url().map_or(true, |url| url == index.url)
})
})
.unwrap_or(false);
let repo = if!exists {
let mut opts = git2::RepositoryInitOptions::new();
opts.external_template(false);
let repo = git2::Repository::init_opts(&index.path, &opts)?;
{
let mut origin_remote = repo
.find_remote("origin")
.or_else(|_| repo.remote_anonymous(&index.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
repo
} else {
git2::Repository::open(&index.path)?
};
// It's going to be used in a self-referential type. Boxing prevents it from being moved
// and adds a layer of indirection that will hopefully not upset noalias analysis.
let repo = Box::new(repo);
let head = repo
// Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD
.refname_to_id("FETCH_HEAD")
.or_else(|_| repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let tree = {
let commit = repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }
};
Ok(Self {
inner: index,
head_str,
rt: UnsafeRepoTree {
repo,
tree,
_self_referential: PhantomPinned,
},
})
}
/// Fetches latest from the remote index repository. Note that using this
/// method will mean no cache entries will be used, if a new commit is fetched
/// from the repository, as their commit version will no longer match.
pub fn | (&mut self) -> Result<(), Error> {
{
let mut origin_remote = self
.rt
.repo
.find_remote("origin")
.or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
let head = self
.rt
.repo
.refname_to_id("FETCH_HEAD")
.or_else(|_| self.rt.repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let commit = self.rt.repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) };
self.head_str = head_str;
self.rt.tree = tree;
Ok(())
}
/// Reads a crate from the index, it will attempt to use a cached entry if
/// one is available, otherwise it will fallback to reading the crate
/// directly from the git blob containing the crate information.
pub fn crate_(&self, name: &str) -> Option<Crate> {
let rel_path = match crate::crate_name_to_relative_path(name) {
Some(rp) => rp,
None => return None,
};
// Attempt to load the.cache/ entry first, this is purely an acceleration
// mechanism and can fail for a few reasons that are non-fatal
{
let mut cache_path = self.inner.path.join(".cache");
cache_path.push(&rel_path);
if let Ok(cache_bytes) = std::fs::read(&cache_path) {
if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) {
return Some(krate);
}
}
}
// Fallback to reading the blob directly via git if we don't have a
// valid cache entry
self.crate_from_rel_path(&rel_path).ok()
}
fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> {
let entry = self.rt.tree.get_path(&Path::new(path))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?;
Crate::from_slice(blob.content()).map_err(Error::Io)
}
/// Retrieve an iterator over all the crates in the index.
/// skips crates that can not be parsed.
#[inline]
pub fn crates(&self) -> Crates<'_> {
Crates {
blobs: self.crates_refs(),
}
}
/// Retrieve an iterator over all the crates in the index.
/// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`]
fn crates_refs(&self) -> CrateRefs<'_> {
let mut stack = Vec::with_capacity(800);
// Scan only directories at top level (skip config.json, etc.)
for entry in self.rt.tree.iter() {
let entry = entry.to_object(&self.rt.repo).unwrap();
if entry.as_tree().is_some() {
stack.push(entry);
}
}
CrateRefs {
stack,
rt: &self.rt,
}
}
/// Get the global configuration of the index.
pub fn index_config(&self) -> Result<IndexConfig, Error> {
let entry = self.rt.tree.get_path(&Path::new("config.json"))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?;
serde_json::from_slice(blob.content()).map_err(Error::Json)
}
}
/// Iterator over all crates in the index, but returns opaque objects that can be parsed separately.
///
/// See [`CrateRef::parse`].
struct CrateRefs<'a> {
stack: Vec<git2::Object<'a>>,
rt: &'a UnsafeRepoTree,
}
/// Opaque representation of a crate in the index. See [`CrateRef::parse`].
pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>);
impl CrateRef<'_> {
#[inline]
/// Parse a crate from [`BareIndex::crates_blobs`] iterator
pub fn parse(&self) -> Option<Crate> {
Crate::from_slice(self.as_slice()?).ok()
}
/// Raw crate data that can be parsed with [`Crate::from_slice`]
pub fn as_slice(&self) -> Option<&[u8]> {
Some(self.0.as_blob()?.content())
}
}
impl<'a> Iterator for CrateRefs<'a> {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter().rev() {
self.stack.push(entry.to_object(&self.rt.repo).unwrap());
}
continue;
}
}
}
None
}
}
pub struct Crates<'a> {
blobs: CrateRefs<'a>,
}
impl<'a> Iterator for Crates<'a> {
type Item = Crate;
fn next(&mut self) -> Option<Self::Item> {
while let Some(next) = self.blobs.next() {
if let Some(k) = CrateRef::parse(&next) {
return Some(k);
}
}
None
}
}
/// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into
/// the root directory name where cargo itself will fetch it on disk
fn url_to_local_dir(url: &str) -> Result<(String, String), Error> {
fn to_hex(num: u64) -> String {
const CHARS: &[u8] = b"0123456789abcdef";
let bytes = &[
num as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
];
let mut output = vec![0u8; 16];
let mut ind = 0;
for &byte in bytes {
output[ind] = CHARS[(byte >> 4) as usize];
output[ind + 1] = CHARS[(byte & 0xf) as usize];
ind += 2;
}
String::from_utf8(output).expect("valid utf-8 hex string")
}
#[allow(deprecated)]
fn hash_u64(url: &str) -> u64 {
use std::hash::{Hash, Hasher, SipHasher};
let mut hasher = SipHasher::new_with_keys(0, 0);
// Registry
2usize.hash(&mut hasher);
// Url
url.hash(&mut hasher);
hasher.finish()
}
// Ensure we have a registry or bare url
let (url, scheme_ind) = {
let scheme_ind = url
.find("://")
.ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?;
let scheme_str = &url[..scheme_ind];
if let Some(ind) = scheme_str.find('+') {
if &scheme_str[..ind]!= "registry" {
return Err(Error::Url(format!("'{}' is not a valid registry url", url)));
}
(&url[ind + 1..], scheme_ind - ind - 1)
} else {
(url, scheme_ind)
}
};
// Could use the Url crate for this, but it's simple enough and we don't
// need to deal with every possible url (I hope...)
let host = match url[scheme_ind + 3..].find('/') {
Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end],
None => &url[scheme_ind + 3..],
};
// cargo special cases github.com for reasons, so do the same
let mut canonical = if host == "github.com" {
url.to_lowercase()
} else {
url.to_owned()
};
// Chop off any query params/fragments
if let Some(hash) = canonical.rfind('#') {
canonical.truncate(hash);
}
if let Some(query) = canonical.rfind('?') {
canonical.truncate(query);
}
let ident = to_hex(hash_u64(&canonical));
if canonical.ends_with('/') {
canonical.pop();
}
if canonical.ends_with(".git") {
canonical.truncate(canonical.len() - 4);
}
Ok((format!("{}-{}", host, ident), canonical))
}
#[cfg(test)]
mod test {
#[test]
fn matches_cargo() {
assert_eq!(
super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(),
(
"github.com-1ecc6299db9ec823".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
// I've confirmed this also works with a custom registry, unfortunately
// that one includes a secret key as part of the url which would allow
// anyone to publish to the registry, so uhh...here's a fake one instead
assert_eq!(
super::url_to_local_dir(
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git"
)
.unwrap(),
(
"dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(),
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned()
)
);
// Ensure we actually strip off the irrelevant parts of a url, note that
// the.git suffix is not part of the canonical url, but *is* used when hashing
assert_eq!(
super::url_to_local_dir(&format!(
"registry+{}.git?one=1&two=2#fragment",
crate::INDEX_GIT_URL
))
.unwrap(),
(
"github.com-c786010fb7ef2e6e".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
}
#[test]
fn bare_iterator() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
let mut found_gcc_crate = false;
for c in repo.crates() {
if c.name() == "gcc" {
found_gcc_crate = true;
}
}
assert!(found_gcc_crate);
}
#[test]
fn clones_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let mut repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!(
dep_with_package_name.crate_name(),
dep_with_package_name.package().unwrap()
);
}
test_sval(&repo);
repo.retrieve().expect("Failed to fetch crates.io index");
test_sval(&repo);
}
#[test]
fn opens_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
{
let _ = index
.open_or_clone()
.expect("Failed to clone crates.io index");
}
let mut repo = index
.open_or_clone()
.expect("Failed to open crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect | retrieve | identifier_name |
bare_index.rs |
path.push("registry/index");
path.push(dir_name);
Ok(Self {
path,
url: canonical_url,
})
}
/// Creates a bare index at the provided path with the specified repository URL.
#[inline]
pub fn with_path(path: PathBuf, url: &str) -> Self {
Self {
path,
url: url.to_owned(),
}
}
/// Creates an index for the default crates.io registry, using the same
/// disk location as cargo itself.
#[inline]
pub fn new_cargo_default() -> Self {
// UNWRAP: The default index git URL is known to safely convert to a path.
Self::from_url(crate::INDEX_GIT_URL).unwrap()
}
/// Opens the local index, which acts as a kind of lock for source control
/// operations
#[inline]
pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> {
BareIndexRepo::new(self)
}
/// Get the index directory.
#[inline]
pub fn path(&self) -> &Path {
&self.path
}
}
/// Self-referential struct where `Tree` borrows from `Repository`
struct UnsafeRepoTree {
/// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`.
tree: git2::Tree<'static>,
repo: Box<git2::Repository>,
// Currently!Unpin is Rust's heuristic for self-referential structs
_self_referential: PhantomPinned,
}
/// Opened instance of [`BareIndex`]
pub struct BareIndexRepo<'a> {
inner: &'a BareIndex,
head_str: String,
rt: UnsafeRepoTree,
}
impl<'a> BareIndexRepo<'a> {
fn new(index: &'a BareIndex) -> Result<Self, Error> {
let exists = git2::Repository::discover(&index.path)
.map(|repository| {
repository
.find_remote("origin")
.ok()
// Cargo creates a checkout without an origin set,
// so default to true in case of missing origin
.map_or(true, |remote| {
remote.url().map_or(true, |url| url == index.url)
})
})
.unwrap_or(false);
let repo = if!exists {
let mut opts = git2::RepositoryInitOptions::new();
opts.external_template(false);
let repo = git2::Repository::init_opts(&index.path, &opts)?;
{
let mut origin_remote = repo
.find_remote("origin")
.or_else(|_| repo.remote_anonymous(&index.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
repo
} else {
git2::Repository::open(&index.path)?
};
// It's going to be used in a self-referential type. Boxing prevents it from being moved
// and adds a layer of indirection that will hopefully not upset noalias analysis.
let repo = Box::new(repo);
let head = repo
// Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD
.refname_to_id("FETCH_HEAD")
.or_else(|_| repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let tree = {
let commit = repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }
};
Ok(Self {
inner: index,
head_str,
rt: UnsafeRepoTree {
repo,
tree,
_self_referential: PhantomPinned,
},
})
}
/// Fetches latest from the remote index repository. Note that using this
/// method will mean no cache entries will be used, if a new commit is fetched
/// from the repository, as their commit version will no longer match.
pub fn retrieve(&mut self) -> Result<(), Error> {
{
let mut origin_remote = self
.rt
.repo
.find_remote("origin")
.or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
let head = self
.rt
.repo
.refname_to_id("FETCH_HEAD")
.or_else(|_| self.rt.repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let commit = self.rt.repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) };
self.head_str = head_str;
self.rt.tree = tree;
Ok(())
}
/// Reads a crate from the index, it will attempt to use a cached entry if
/// one is available, otherwise it will fallback to reading the crate
/// directly from the git blob containing the crate information.
pub fn crate_(&self, name: &str) -> Option<Crate> {
let rel_path = match crate::crate_name_to_relative_path(name) {
Some(rp) => rp,
None => return None,
};
// Attempt to load the.cache/ entry first, this is purely an acceleration
// mechanism and can fail for a few reasons that are non-fatal
{
let mut cache_path = self.inner.path.join(".cache");
cache_path.push(&rel_path);
if let Ok(cache_bytes) = std::fs::read(&cache_path) {
if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) {
return Some(krate);
}
}
}
// Fallback to reading the blob directly via git if we don't have a
// valid cache entry
self.crate_from_rel_path(&rel_path).ok()
}
fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> {
let entry = self.rt.tree.get_path(&Path::new(path))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?;
Crate::from_slice(blob.content()).map_err(Error::Io)
}
/// Retrieve an iterator over all the crates in the index.
/// skips crates that can not be parsed.
#[inline]
pub fn crates(&self) -> Crates<'_> {
Crates {
blobs: self.crates_refs(),
}
}
/// Retrieve an iterator over all the crates in the index.
/// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`]
fn crates_refs(&self) -> CrateRefs<'_> {
let mut stack = Vec::with_capacity(800);
// Scan only directories at top level (skip config.json, etc.)
for entry in self.rt.tree.iter() {
let entry = entry.to_object(&self.rt.repo).unwrap();
if entry.as_tree().is_some() {
stack.push(entry);
}
}
CrateRefs {
stack,
rt: &self.rt,
}
}
/// Get the global configuration of the index.
pub fn index_config(&self) -> Result<IndexConfig, Error> {
let entry = self.rt.tree.get_path(&Path::new("config.json"))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?;
serde_json::from_slice(blob.content()).map_err(Error::Json)
}
}
/// Iterator over all crates in the index, but returns opaque objects that can be parsed separately.
///
/// See [`CrateRef::parse`].
struct CrateRefs<'a> {
stack: Vec<git2::Object<'a>>,
rt: &'a UnsafeRepoTree,
}
/// Opaque representation of a crate in the index. See [`CrateRef::parse`].
pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>);
impl CrateRef<'_> {
#[inline]
/// Parse a crate from [`BareIndex::crates_blobs`] iterator
pub fn parse(&self) -> Option<Crate> {
Crate::from_slice(self.as_slice()?).ok()
}
/// Raw crate data that can be parsed with [`Crate::from_slice`]
pub fn as_slice(&self) -> Option<&[u8]> {
Some(self.0.as_blob()?.content())
}
}
impl<'a> Iterator for CrateRefs<'a> {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter().rev() {
self.stack.push(entry.to_object(&self.rt.repo).unwrap());
}
continue;
}
}
}
None
}
}
pub struct Crates<'a> {
blobs: CrateRefs<'a>,
}
impl<'a> Iterator for Crates<'a> {
type Item = Crate;
fn next(&mut self) -> Option<Self::Item> {
while let Some(next) = self.blobs.next() {
if let Some(k) = CrateRef::parse(&next) {
return Some(k);
}
}
None
}
}
/// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into
/// the root directory name where cargo itself will fetch it on disk
fn url_to_local_dir(url: &str) -> Result<(String, String), Error> {
fn to_hex(num: u64) -> String {
const CHARS: &[u8] = b"0123456789abcdef";
let bytes = &[
num as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
];
let mut output = vec![0u8; 16];
let mut ind = 0;
for &byte in bytes {
output[ind] = CHARS[(byte >> 4) as usize];
output[ind + 1] = CHARS[(byte & 0xf) as usize];
ind += 2;
}
String::from_utf8(output).expect("valid utf-8 hex string")
}
#[allow(deprecated)]
fn hash_u64(url: &str) -> u64 {
use std::hash::{Hash, Hasher, SipHasher};
let mut hasher = SipHasher::new_with_keys(0, 0);
// Registry
2usize.hash(&mut hasher);
// Url
url.hash(&mut hasher);
hasher.finish()
}
// Ensure we have a registry or bare url
let (url, scheme_ind) = {
let scheme_ind = url
.find("://")
.ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?;
let scheme_str = &url[..scheme_ind];
if let Some(ind) = scheme_str.find('+') {
if &scheme_str[..ind]!= "registry" {
return Err(Error::Url(format!("'{}' is not a valid registry url", url)));
}
(&url[ind + 1..], scheme_ind - ind - 1)
} else {
(url, scheme_ind)
}
};
// Could use the Url crate for this, but it's simple enough and we don't
// need to deal with every possible url (I hope...)
let host = match url[scheme_ind + 3..].find('/') {
Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end],
None => &url[scheme_ind + 3..],
};
// cargo special cases github.com for reasons, so do the same
let mut canonical = if host == "github.com" {
url.to_lowercase()
} else {
url.to_owned()
};
// Chop off any query params/fragments
if let Some(hash) = canonical.rfind('#') {
canonical.truncate(hash);
}
if let Some(query) = canonical.rfind('?') {
canonical.truncate(query);
}
let ident = to_hex(hash_u64(&canonical));
if canonical.ends_with('/') {
canonical.pop();
}
if canonical.ends_with(".git") {
canonical.truncate(canonical.len() - 4);
}
Ok((format!("{}-{}", host, ident), canonical))
}
#[cfg(test)]
mod test {
#[test]
fn matches_cargo() {
assert_eq!(
super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(),
(
"github.com-1ecc6299db9ec823".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
// I've confirmed this also works with a custom registry, unfortunately
// that one includes a secret key as part of the url which would allow
// anyone to publish to the registry, so uhh...here's a fake one instead
assert_eq!(
super::url_to_local_dir(
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git"
)
.unwrap(),
(
"dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(),
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned()
)
);
// Ensure we actually strip off the irrelevant parts of a url, note that
// the.git suffix is not part of the canonical url, but *is* used when hashing
assert_eq!(
super::url_to_local_dir(&format!(
"registry+{}.git?one=1&two=2#fragment",
crate::INDEX_GIT_URL
))
.unwrap(),
(
"github.com-c786010fb7ef2e6e".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
}
#[test]
fn bare_iterator() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
let mut found_gcc_crate = false;
for c in repo.crates() {
if c.name() == "gcc" {
found_gcc_crate = true;
}
}
assert!(found_gcc_crate);
}
#[test]
fn clones_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let mut repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!(
dep_with_package_name.crate_name(),
dep_with_package_name.package().unwrap()
);
}
test_sval(&repo);
repo.retrieve().expect("Failed to fetch crates.io index");
test_sval(&repo);
}
#[test]
fn opens_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
{
let _ = index
.open_or_clone()
.expect("Failed to clone crates.io index");
}
let mut repo = index
.open_or_clone()
.expect("Failed to open crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) | {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!( | identifier_body |
|
bare_index.rs | /// Creates a bare index from a provided URL, opening the same location on
/// disk that cargo uses for that registry index.
pub fn from_url(url: &str) -> Result<Self, Error> {
let (dir_name, canonical_url) = url_to_local_dir(url)?;
let mut path = home::cargo_home().unwrap_or_default();
path.push("registry/index");
path.push(dir_name);
Ok(Self {
path,
url: canonical_url,
})
}
| pub fn with_path(path: PathBuf, url: &str) -> Self {
Self {
path,
url: url.to_owned(),
}
}
/// Creates an index for the default crates.io registry, using the same
/// disk location as cargo itself.
#[inline]
pub fn new_cargo_default() -> Self {
// UNWRAP: The default index git URL is known to safely convert to a path.
Self::from_url(crate::INDEX_GIT_URL).unwrap()
}
/// Opens the local index, which acts as a kind of lock for source control
/// operations
#[inline]
pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> {
BareIndexRepo::new(self)
}
/// Get the index directory.
#[inline]
pub fn path(&self) -> &Path {
&self.path
}
}
/// Self-referential struct where `Tree` borrows from `Repository`
struct UnsafeRepoTree {
/// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`.
tree: git2::Tree<'static>,
repo: Box<git2::Repository>,
// Currently!Unpin is Rust's heuristic for self-referential structs
_self_referential: PhantomPinned,
}
/// Opened instance of [`BareIndex`]
pub struct BareIndexRepo<'a> {
inner: &'a BareIndex,
head_str: String,
rt: UnsafeRepoTree,
}
impl<'a> BareIndexRepo<'a> {
fn new(index: &'a BareIndex) -> Result<Self, Error> {
let exists = git2::Repository::discover(&index.path)
.map(|repository| {
repository
.find_remote("origin")
.ok()
// Cargo creates a checkout without an origin set,
// so default to true in case of missing origin
.map_or(true, |remote| {
remote.url().map_or(true, |url| url == index.url)
})
})
.unwrap_or(false);
let repo = if!exists {
let mut opts = git2::RepositoryInitOptions::new();
opts.external_template(false);
let repo = git2::Repository::init_opts(&index.path, &opts)?;
{
let mut origin_remote = repo
.find_remote("origin")
.or_else(|_| repo.remote_anonymous(&index.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
repo
} else {
git2::Repository::open(&index.path)?
};
// It's going to be used in a self-referential type. Boxing prevents it from being moved
// and adds a layer of indirection that will hopefully not upset noalias analysis.
let repo = Box::new(repo);
let head = repo
// Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD
.refname_to_id("FETCH_HEAD")
.or_else(|_| repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let tree = {
let commit = repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }
};
Ok(Self {
inner: index,
head_str,
rt: UnsafeRepoTree {
repo,
tree,
_self_referential: PhantomPinned,
},
})
}
/// Fetches latest from the remote index repository. Note that using this
/// method will mean no cache entries will be used, if a new commit is fetched
/// from the repository, as their commit version will no longer match.
pub fn retrieve(&mut self) -> Result<(), Error> {
{
let mut origin_remote = self
.rt
.repo
.find_remote("origin")
.or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?;
origin_remote.fetch(
&[
"HEAD:refs/remotes/origin/HEAD",
"master:refs/remotes/origin/master",
],
Some(&mut crate::fetch_opts()),
None,
)?;
}
let head = self
.rt
.repo
.refname_to_id("FETCH_HEAD")
.or_else(|_| self.rt.repo.refname_to_id("HEAD"))?;
let head_str = head.to_string();
let commit = self.rt.repo.find_commit(head)?;
let tree = commit.tree()?;
// See `UnsafeRepoTree`
let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) };
self.head_str = head_str;
self.rt.tree = tree;
Ok(())
}
/// Reads a crate from the index, it will attempt to use a cached entry if
/// one is available, otherwise it will fallback to reading the crate
/// directly from the git blob containing the crate information.
pub fn crate_(&self, name: &str) -> Option<Crate> {
let rel_path = match crate::crate_name_to_relative_path(name) {
Some(rp) => rp,
None => return None,
};
// Attempt to load the.cache/ entry first, this is purely an acceleration
// mechanism and can fail for a few reasons that are non-fatal
{
let mut cache_path = self.inner.path.join(".cache");
cache_path.push(&rel_path);
if let Ok(cache_bytes) = std::fs::read(&cache_path) {
if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) {
return Some(krate);
}
}
}
// Fallback to reading the blob directly via git if we don't have a
// valid cache entry
self.crate_from_rel_path(&rel_path).ok()
}
fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> {
let entry = self.rt.tree.get_path(&Path::new(path))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?;
Crate::from_slice(blob.content()).map_err(Error::Io)
}
/// Retrieve an iterator over all the crates in the index.
/// skips crates that can not be parsed.
#[inline]
pub fn crates(&self) -> Crates<'_> {
Crates {
blobs: self.crates_refs(),
}
}
/// Retrieve an iterator over all the crates in the index.
/// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`]
fn crates_refs(&self) -> CrateRefs<'_> {
let mut stack = Vec::with_capacity(800);
// Scan only directories at top level (skip config.json, etc.)
for entry in self.rt.tree.iter() {
let entry = entry.to_object(&self.rt.repo).unwrap();
if entry.as_tree().is_some() {
stack.push(entry);
}
}
CrateRefs {
stack,
rt: &self.rt,
}
}
/// Get the global configuration of the index.
pub fn index_config(&self) -> Result<IndexConfig, Error> {
let entry = self.rt.tree.get_path(&Path::new("config.json"))?;
let object = entry.to_object(&self.rt.repo)?;
let blob = object
.as_blob()
.ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?;
serde_json::from_slice(blob.content()).map_err(Error::Json)
}
}
/// Iterator over all crates in the index, but returns opaque objects that can be parsed separately.
///
/// See [`CrateRef::parse`].
struct CrateRefs<'a> {
stack: Vec<git2::Object<'a>>,
rt: &'a UnsafeRepoTree,
}
/// Opaque representation of a crate in the index. See [`CrateRef::parse`].
pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>);
impl CrateRef<'_> {
#[inline]
/// Parse a crate from [`BareIndex::crates_blobs`] iterator
pub fn parse(&self) -> Option<Crate> {
Crate::from_slice(self.as_slice()?).ok()
}
/// Raw crate data that can be parsed with [`Crate::from_slice`]
pub fn as_slice(&self) -> Option<&[u8]> {
Some(self.0.as_blob()?.content())
}
}
impl<'a> Iterator for CrateRefs<'a> {
type Item = CrateRef<'a>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(last) = self.stack.pop() {
match last.as_tree() {
None => return Some(CrateRef(last)),
Some(tree) => {
for entry in tree.iter().rev() {
self.stack.push(entry.to_object(&self.rt.repo).unwrap());
}
continue;
}
}
}
None
}
}
pub struct Crates<'a> {
blobs: CrateRefs<'a>,
}
impl<'a> Iterator for Crates<'a> {
type Item = Crate;
fn next(&mut self) -> Option<Self::Item> {
while let Some(next) = self.blobs.next() {
if let Some(k) = CrateRef::parse(&next) {
return Some(k);
}
}
None
}
}
/// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into
/// the root directory name where cargo itself will fetch it on disk
fn url_to_local_dir(url: &str) -> Result<(String, String), Error> {
fn to_hex(num: u64) -> String {
const CHARS: &[u8] = b"0123456789abcdef";
let bytes = &[
num as u8,
(num >> 8) as u8,
(num >> 16) as u8,
(num >> 24) as u8,
(num >> 32) as u8,
(num >> 40) as u8,
(num >> 48) as u8,
(num >> 56) as u8,
];
let mut output = vec![0u8; 16];
let mut ind = 0;
for &byte in bytes {
output[ind] = CHARS[(byte >> 4) as usize];
output[ind + 1] = CHARS[(byte & 0xf) as usize];
ind += 2;
}
String::from_utf8(output).expect("valid utf-8 hex string")
}
#[allow(deprecated)]
fn hash_u64(url: &str) -> u64 {
use std::hash::{Hash, Hasher, SipHasher};
let mut hasher = SipHasher::new_with_keys(0, 0);
// Registry
2usize.hash(&mut hasher);
// Url
url.hash(&mut hasher);
hasher.finish()
}
// Ensure we have a registry or bare url
let (url, scheme_ind) = {
let scheme_ind = url
.find("://")
.ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?;
let scheme_str = &url[..scheme_ind];
if let Some(ind) = scheme_str.find('+') {
if &scheme_str[..ind]!= "registry" {
return Err(Error::Url(format!("'{}' is not a valid registry url", url)));
}
(&url[ind + 1..], scheme_ind - ind - 1)
} else {
(url, scheme_ind)
}
};
// Could use the Url crate for this, but it's simple enough and we don't
// need to deal with every possible url (I hope...)
let host = match url[scheme_ind + 3..].find('/') {
Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end],
None => &url[scheme_ind + 3..],
};
// cargo special cases github.com for reasons, so do the same
let mut canonical = if host == "github.com" {
url.to_lowercase()
} else {
url.to_owned()
};
// Chop off any query params/fragments
if let Some(hash) = canonical.rfind('#') {
canonical.truncate(hash);
}
if let Some(query) = canonical.rfind('?') {
canonical.truncate(query);
}
let ident = to_hex(hash_u64(&canonical));
if canonical.ends_with('/') {
canonical.pop();
}
if canonical.ends_with(".git") {
canonical.truncate(canonical.len() - 4);
}
Ok((format!("{}-{}", host, ident), canonical))
}
#[cfg(test)]
mod test {
#[test]
fn matches_cargo() {
assert_eq!(
super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(),
(
"github.com-1ecc6299db9ec823".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
// I've confirmed this also works with a custom registry, unfortunately
// that one includes a secret key as part of the url which would allow
// anyone to publish to the registry, so uhh...here's a fake one instead
assert_eq!(
super::url_to_local_dir(
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git"
)
.unwrap(),
(
"dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(),
"https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned()
)
);
// Ensure we actually strip off the irrelevant parts of a url, note that
// the.git suffix is not part of the canonical url, but *is* used when hashing
assert_eq!(
super::url_to_local_dir(&format!(
"registry+{}.git?one=1&two=2#fragment",
crate::INDEX_GIT_URL
))
.unwrap(),
(
"github.com-c786010fb7ef2e6e".to_owned(),
crate::INDEX_GIT_URL.to_owned()
)
);
}
#[test]
fn bare_iterator() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
let mut found_gcc_crate = false;
for c in repo.crates() {
if c.name() == "gcc" {
found_gcc_crate = true;
}
}
assert!(found_gcc_crate);
}
#[test]
fn clones_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
let mut repo = index
.open_or_clone()
.expect("Failed to clone crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval does not have expected dependency?");
assert_ne!(
dep_with_package_name.name(),
dep_with_package_name.package().unwrap()
);
assert_eq!(
dep_with_package_name.crate_name(),
dep_with_package_name.package().unwrap()
);
}
test_sval(&repo);
repo.retrieve().expect("Failed to fetch crates.io index");
test_sval(&repo);
}
#[test]
fn opens_bare_index() {
use super::BareIndex;
let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap();
let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL);
{
let _ = index
.open_or_clone()
.expect("Failed to clone crates.io index");
}
let mut repo = index
.open_or_clone()
.expect("Failed to open crates.io index");
fn test_sval(repo: &super::BareIndexRepo<'_>) {
let krate = repo
.crate_("sval")
.expect("Could not find the crate sval in the index");
let version = krate
.versions()
.iter()
.find(|v| v.version() == "0.0.1")
.expect("Version 0.0.1 of sval does not exist?");
let dep_with_package_name = version
.dependencies()
.iter()
.find(|d| d.name() == "serde_lib")
.expect("sval | /// Creates a bare index at the provided path with the specified repository URL.
#[inline] | random_line_split |
encode.rs | use std::collections::{HashMap, HashSet, BTreeMap};
use std::fmt;
use std::str::FromStr;
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use package::Package;
use package_id::PackageId;
use source::SourceId;
use util::{CraftResult, Graph, Config, internal, ChainError, CraftError};
use workspace::Workspace;
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
/// `root` is optional to allow forward compatibility.
root: Option<EncodableDependency>,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> {
let path_deps = build_path_deps(ws);
let packages = {
let mut packages = self.package.unwrap_or(Vec::new());
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let (live_pkgs, all_pkgs) = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source.clone(),
};
if!all_pkgs.insert(enc_id.clone()) {
return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name)));
}
let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => continue,
Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
(live_pkgs, all_pkgs)
};
let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> {
match live_pkgs.get(enc_id) {
Some(&(ref id, _)) => Ok(Some(id.clone())),
None => {
if all_pkgs.contains(enc_id) {
// Package is found in the lockfile, but it is
// no longer a member of the workspace.
Ok(None)
} else {
Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \
package list",
enc_id)))
}
}
}
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone(), &[]);
}
for &(ref id, ref pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on);
}
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn from_str(s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
if self.use_root_key && root == id {
return None;
}
Some(encodable_resolve_node(id, self.resolve))
})
.collect::<Vec<_>>();
let mut metadata = self.resolve.metadata.clone();
for id in ids.iter().filter(|id|!id.source_id().is_path()) {
let checksum = match self.resolve.checksums[*id] {
Some(ref s) => &s[..],
None => "<none>",
};
let id = encodable_package_id(id);
metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string());
}
let metadata = if metadata.len() == 0 {
None
} else {
Some(metadata)
};
let root = if self.use_root_key {
Some(encodable_resolve_node(&root, self.resolve))
} else {
None
};
EncodableResolve {
package: Some(encodable),
root: root,
metadata: metadata,
}
.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => (Some(encodable_package_id(id)), None),
None => {
let mut deps = resolve.graph
.edges(id)
.into_iter()
.flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId | {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
} | identifier_body |
|
encode.rs | use std::collections::{HashMap, HashSet, BTreeMap};
use std::fmt;
use std::str::FromStr;
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use package::Package;
use package_id::PackageId;
use source::SourceId;
use util::{CraftResult, Graph, Config, internal, ChainError, CraftError};
use workspace::Workspace;
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
/// `root` is optional to allow forward compatibility.
root: Option<EncodableDependency>,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> {
let path_deps = build_path_deps(ws);
let packages = {
let mut packages = self.package.unwrap_or(Vec::new());
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let (live_pkgs, all_pkgs) = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source.clone(),
};
if!all_pkgs.insert(enc_id.clone()) {
return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name)));
}
let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => continue,
Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
(live_pkgs, all_pkgs)
};
let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> {
match live_pkgs.get(enc_id) {
Some(&(ref id, _)) => Ok(Some(id.clone())),
None => {
if all_pkgs.contains(enc_id) {
// Package is found in the lockfile, but it is
// no longer a member of the workspace.
Ok(None)
} else {
Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \
package list",
enc_id)))
}
}
}
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone(), &[]);
}
for &(ref id, ref pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on);
}
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn | (s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
if self.use_root_key && root == id {
return None;
}
Some(encodable_resolve_node(id, self.resolve))
})
.collect::<Vec<_>>();
let mut metadata = self.resolve.metadata.clone();
for id in ids.iter().filter(|id|!id.source_id().is_path()) {
let checksum = match self.resolve.checksums[*id] {
Some(ref s) => &s[..],
None => "<none>",
};
let id = encodable_package_id(id);
metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string());
}
let metadata = if metadata.len() == 0 {
None
} else {
Some(metadata)
};
let root = if self.use_root_key {
Some(encodable_resolve_node(&root, self.resolve))
} else {
None
};
EncodableResolve {
package: Some(encodable),
root: root,
metadata: metadata,
}
.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => (Some(encodable_package_id(id)), None),
None => {
let mut deps = resolve.graph
.edges(id)
.into_iter()
.flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
}
| from_str | identifier_name |
encode.rs | use std::collections::{HashMap, HashSet, BTreeMap};
use std::fmt;
use std::str::FromStr;
use regex::Regex;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use package::Package;
use package_id::PackageId;
use source::SourceId;
use util::{CraftResult, Graph, Config, internal, ChainError, CraftError};
use workspace::Workspace;
use super::Resolve;
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct EncodableResolve {
package: Option<Vec<EncodableDependency>>,
/// `root` is optional to allow forward compatibility.
root: Option<EncodableDependency>,
metadata: Option<Metadata>,
}
pub type Metadata = BTreeMap<String, String>;
impl EncodableResolve {
pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> {
let path_deps = build_path_deps(ws);
let packages = {
let mut packages = self.package.unwrap_or(Vec::new());
if let Some(root) = self.root {
packages.insert(0, root);
}
packages
};
// `PackageId`s in the lock file don't include the `source` part
// for workspace members, so we reconstruct proper ids.
let (live_pkgs, all_pkgs) = {
let mut live_pkgs = HashMap::new();
let mut all_pkgs = HashSet::new();
for pkg in packages.iter() {
let enc_id = EncodablePackageId {
name: pkg.name.clone(),
version: pkg.version.clone(),
source: pkg.source.clone(),
};
if!all_pkgs.insert(enc_id.clone()) {
return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name)));
}
let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) {
// We failed to find a local package in the workspace.
// It must have been removed and should be ignored.
None => continue,
Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?,
};
assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none())
}
(live_pkgs, all_pkgs)
};
let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> {
match live_pkgs.get(enc_id) {
Some(&(ref id, _)) => Ok(Some(id.clone())),
None => {
if all_pkgs.contains(enc_id) {
// Package is found in the lockfile, but it is
// no longer a member of the workspace.
Ok(None)
} else {
Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \
package list",
enc_id)))
}
}
}
};
let g = {
let mut g = Graph::new();
for &(ref id, _) in live_pkgs.values() {
g.add(id.clone(), &[]);
}
for &(ref id, ref pkg) in live_pkgs.values() {
let deps = match pkg.dependencies {
Some(ref deps) => deps,
None => continue,
};
| }
}
}
g
};
let replacements = {
let mut replacements = HashMap::new();
for &(ref id, ref pkg) in live_pkgs.values() {
if let Some(ref replace) = pkg.replace {
assert!(pkg.dependencies.is_none());
if let Some(replace_id) = lookup_id(replace)? {
replacements.insert(id.clone(), replace_id);
}
}
}
replacements
};
let mut metadata = self.metadata.unwrap_or(BTreeMap::new());
// Parse out all package checksums. After we do this we can be in a few
// situations:
//
// * We parsed no checksums. In this situation we're dealing with an old
// lock file and we're gonna fill them all in.
// * We parsed some checksums, but not one for all packages listed. It
// could have been the case that some were listed, then an older Craft
// client added more dependencies, and now we're going to fill in the
// missing ones.
// * There are too many checksums listed, indicative of an older Craft
// client removing a package but not updating the checksums listed.
//
// In all of these situations they're part of normal usage, so we don't
// really worry about it. We just try to slurp up as many checksums as
// possible.
let mut checksums = HashMap::new();
let prefix = "checksum ";
let mut to_remove = Vec::new();
for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) {
to_remove.push(k.to_string());
let k = &k[prefix.len()..];
let enc_id: EncodablePackageId = k.parse()
.chain_error(|| internal("invalid encoding of checksum in lockfile"))?;
let id = match lookup_id(&enc_id) {
Ok(Some(id)) => id,
_ => continue,
};
let v = if v == "<none>" {
None
} else {
Some(v.to_string())
};
checksums.insert(id, v);
}
for k in to_remove {
metadata.remove(&k);
}
Ok(Resolve {
graph: g,
features: HashMap::new(),
replacements: replacements,
checksums: checksums,
metadata: metadata,
})
}
}
fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> {
// If a chest is *not* a path source, then we're probably in a situation
// such as `craft install` with a lock file from a remote dependency. In
// that case we don't need to fixup any path dependencies (as they're not
// actually path dependencies any more), so we ignore them.
let members = ws.members()
.filter(|p| p.package_id().source_id().is_path())
.collect::<Vec<_>>();
let mut ret = HashMap::new();
for member in members.iter() {
ret.insert(member.package_id().name().to_string(),
member.package_id().source_id().clone());
}
for member in members.iter() {
build(member, ws.config(), &mut ret);
}
return ret;
fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) {
let replace = pkg.manifest().replace();
let deps = pkg.dependencies()
.iter()
.chain(replace.iter().map(|p| &p.1))
.filter(|d|!ret.contains_key(d.name()))
.map(|d| d.source_id())
.filter(|id| id.is_path())
.filter_map(|id| id.url().to_file_path().ok())
.map(|path| path.join("Craft.toml"))
.filter_map(|path| Package::for_path(&path, config).ok())
.collect::<Vec<_>>();
for pkg in deps {
ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone());
build(&pkg, config, ret);
}
}
}
#[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct EncodableDependency {
name: String,
version: String,
source: Option<SourceId>,
dependencies: Option<Vec<EncodablePackageId>>,
replace: Option<EncodablePackageId>,
}
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)]
pub struct EncodablePackageId {
name: String,
version: String,
source: Option<SourceId>,
}
impl fmt::Display for EncodablePackageId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.name, self.version)?;
if let Some(ref s) = self.source {
write!(f, " ({})", s.to_url())?;
}
Ok(())
}
}
impl FromStr for EncodablePackageId {
type Err = Box<CraftError>;
fn from_str(s: &str) -> CraftResult<EncodablePackageId> {
let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap();
let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?;
let name = captures.at(1).unwrap();
let version = captures.at(2).unwrap();
let source_id = match captures.at(3) {
Some(s) => Some(SourceId::from_url(s)?),
None => None,
};
Ok(EncodablePackageId {
name: name.to_string(),
version: version.to_string(),
source: source_id,
})
}
}
impl Encodable for EncodablePackageId {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.to_string().encode(s)
}
}
impl Decodable for EncodablePackageId {
fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> {
String::decode(d).and_then(|string| {
string.parse::<EncodablePackageId>()
.map_err(|e| d.error(&e.to_string()))
})
}
}
pub struct WorkspaceResolve<'a, 'cfg: 'a> {
pub ws: &'a Workspace<'cfg>,
pub resolve: &'a Resolve,
pub use_root_key: bool,
}
impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect();
ids.sort();
let root = self.ws
.members()
.max_by_key(|member| member.name())
.unwrap()
.package_id();
let encodable = ids.iter()
.filter_map(|&id| {
if self.use_root_key && root == id {
return None;
}
Some(encodable_resolve_node(id, self.resolve))
})
.collect::<Vec<_>>();
let mut metadata = self.resolve.metadata.clone();
for id in ids.iter().filter(|id|!id.source_id().is_path()) {
let checksum = match self.resolve.checksums[*id] {
Some(ref s) => &s[..],
None => "<none>",
};
let id = encodable_package_id(id);
metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string());
}
let metadata = if metadata.len() == 0 {
None
} else {
Some(metadata)
};
let root = if self.use_root_key {
Some(encodable_resolve_node(&root, self.resolve))
} else {
None
};
EncodableResolve {
package: Some(encodable),
root: root,
metadata: metadata,
}
.encode(s)
}
}
fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency {
let (replace, deps) = match resolve.replacement(id) {
Some(id) => (Some(encodable_package_id(id)), None),
None => {
let mut deps = resolve.graph
.edges(id)
.into_iter()
.flat_map(|a| a)
.map(encodable_package_id)
.collect::<Vec<_>>();
deps.sort();
(None, Some(deps))
}
};
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().clone())
};
EncodableDependency {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
dependencies: deps,
replace: replace,
}
}
fn encodable_package_id(id: &PackageId) -> EncodablePackageId {
let source = if id.source_id().is_path() {
None
} else {
Some(id.source_id().with_precise(None))
};
EncodablePackageId {
name: id.name().to_string(),
version: id.version().to_string(),
source: source,
}
} | for edge in deps.iter() {
if let Some(to_depend_on) = lookup_id(edge)? {
g.link(id.clone(), to_depend_on); | random_line_split |
technique.rs | use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::error::*;
use std::fs;
use std::path::Path;
use regex::Regex;
use lazy_static::lazy_static;
use toml;
use nom::combinator::*;
use nom::sequence::*;
use nom::bytes::complete::*;
use nom::character::complete::*;
use nom::branch::alt;
use nom::multi::many1;
use nom::IResult;
use std::str;
#[derive(Serialize, Deserialize)]
struct Technique {
name: String,
description: String,
version: String,
bundle_name: String,
parameter: Vec<Value>,
bundle_args: Vec<String>,
method_calls: Vec<MethodCall>,
}
#[derive(Serialize, Deserialize)]
struct MethodCall {
method_name: String,
class_context: String,
args: Vec<String>,
component: String,
}
pub fn | (json_file: &Path, rl_file: &Path) -> Result<()> {
let config_data = fs::read_to_string("data/config.toml").expect("Cannot read config.toml file");
let config: toml::Value = toml::from_str(&config_data).expect("Invalig config.toml file");
// we use if let for error conversion
// we don't use match for better linear reading
let json_data = fs::read_to_string(&json_file);
if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) }
let technique = serde_json::from_str::<Technique>(&json_data.unwrap());
if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) }
let rl_technique = translate(&config, &technique.unwrap())?;
if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) }
Ok(())
}
fn translate(config: &toml::Value, technique: &Technique) -> Result<String> {
let parameters_meta = serde_json::to_string(&technique.parameter);
if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) }
let parameters = technique.bundle_args.join(",");
let calls = map_strings_results(
technique.method_calls.iter(), |c| translate_call(config, c), "\n"
)?;
let out = format!(r#"@format=0
# This file has been generated with rltranslate
@name="{name}"
@description="{description}"
@version="{version}"
@parameters={parameters_meta}
resource {bundle_name}({parameters})
{bundle_name} state technique() {{
{calls}
}}
"#, description=technique.description,
version=technique.version,
name=technique.name,
bundle_name=technique.bundle_name,
parameters_meta=parameters_meta.unwrap(),
parameters=parameters,
calls=calls);
Ok(out)
}
fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> {
lazy_static! {
static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap();
}
// separate resource and state
let (resource,state) = match RE.captures(&call.method_name) {
Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()),
None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))),
};
// split argument list
let rconf = match config.get("resources") {
None => return Err(Error::User("No resources section in config.toml".into())),
Some(m) => m,
};
let res_arg_v = match rconf.get(resource) {
None => toml::value::Value::Integer(1),
Some(r) => r.clone(),
};
let res_arg_count: usize = match res_arg_v.as_integer() {
None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))),
Some(v) => v as usize,
};
let it = &mut call.args.iter();
let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?;
let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?;
// call formating
let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args);
let out_state = if call.class_context == "any" {
format!(" {}", call_str)
} else {
let condition = translate_condition(config, &call.class_context)?;
format!(" if {} => {}", condition, call_str)
};
// outcome detection and formating
let mconf = match config.get("methods") {
None => return Err(Error::User("No methods section in config.toml".into())),
Some(m) => m,
};
let method = match mconf.get(&call.method_name) {
None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))),
Some(m) => m,
};
let class_prefix = match method.get("class_prefix") {
None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))),
Some(m) => m.as_str().unwrap(),
};
let class_parameter_id = match method.get("class_parameter_id") {
None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))),
Some(m) => m.as_integer().unwrap(),
};
let class_parameter_value = &call.args[class_parameter_id as usize];
let canonic_parameter = canonify(class_parameter_value);
let outcome = format!(" as {}_{}",class_prefix,canonic_parameter);
// TODO remove outcome if there is no usage
Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome))
}
fn canonify(input: &str) -> String {
let s = input.as_bytes().iter()
.map(|x|
if x.is_ascii_alphanumeric() || *x == b'_' {
*x
} else {
b'_'
}
)
.collect::<Vec<u8>>();
str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned()
}
#[derive(Clone)]
struct CFVariable {
ns: Option<String>,
name: String,
}
fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> {
map(tuple((
opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())),
map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()),
)), |(ns, name)| CFVariable { ns, name })(i)
}
#[derive(Clone)]
enum CFStringElt {
Static(String), // static content
Variable(CFVariable), // variable name
}
impl CFStringElt {
fn to_string(&self) -> Result<String> {
Ok(match self {
CFStringElt::Static(s) => s.to_string(),
CFStringElt::Variable(v) => {
match &v.ns {
None => v.name.clone(), // a parameter
Some(ns) => match ns.as_ref() {
"const" => (match v.name.as_ref() {
"dollar" => "$",
"dirsep" => "/",
"endl" => "\\n",
"n" => "\\n",
"r" => "\\r",
"t" => "\\t",
_ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))),
}).into(),
"sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))),
"this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))),
ns => format!("${{{}.{}}}",ns,v.name),
},
}
// TODO
// - array ->?
// - list ->?
},
})
}
}
fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> {
// There is a rest inside so this just serve as a guard
all_consuming(
alt((
many1(alt((
// variable ${}
map(
delimited(tag("${"), parse_cfvariable, tag("}")),
CFStringElt::Variable),
// variable $()
map(
delimited(tag("$("), parse_cfvariable, tag(")")),
CFStringElt::Variable),
// constant
map(take_until("$"), |s: &str| CFStringElt::Static(s.into())),
// end of string
map(preceded(
peek(anychar), // do no take rest if we are already at the end
rest),
|s: &str| CFStringElt::Static(s.into())),
))),
// empty string
value(vec![CFStringElt::Static("".into())], not(anychar)),
))
)(i)
}
fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> {
let var = match parse_cfstring(arg) {
Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))),
Ok((_,o)) => o
};
map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",")
}
fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> {
lazy_static! {
static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap();
static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap();
}
// detect method outcome class
if let Some(caps) = METHOD_RE.captures(cond) {
let (method, status) = (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str());
if vec![ "kept", "success" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ success", method));
} else if vec![ "error", "not_ok", "failed", "denied", "timeout" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ error", method));
} else if vec![ "repaired", "ok", "reached" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ {}", method, status));
}
};
// detect system classes
if let Some(caps) = OS_RE.captures(cond) {
// TODO here we consider any match is an os match, should we have an OS whitelist?
// OS are global enum so we don't have to say which enum to match
return Ok(cond.into());
}
// TODO detect condition expressions
Err(Error::User(format!("Don't know how to handle class '{}'", cond)))
}
//#[cfg(test)]
//mod tests {
// use super::*;
//
// #[test]
// fn test_json() {
// let data = r#"
//{
// "name": "variable",
// "description": "",
// "version": "1.0",
// "bundle_name": "variable",
// "parameter": [
// {
// "constraints": {
// "allow_whitespace_string": false,
// "allow_empty_string": false,
// "max_length": 16384
// },
// "name": "iname",
// "id": "53042794-4d2a-41c7-a690-b0d760a78a51"
// },
// {
// "constraints": {
// "allow_whitespace_string": false,
// "allow_empty_string": false,
// "max_length": 16384
// },
// "name": "ip",
// "id": "aa74f824-6085-46b4-94b4-42803760fd61"
// }
// ],
// "bundle_args": [
// "iname",
// "ip"
// ],
// "method_calls": [
// {
// "method_name": "variable_string",
// "class_context": "any",
// "args": [
// "foo",
// "bar",
// "vim"
// ],
// "component": "Variable string"
// },
// {
// "method_name": "package_state",
// "class_context": "any",
// "args": [
// "${foo.bar}",
// "",
// "",
// "",
// "present"
// ],
// "component": "Package state"
// }
// ]
//}
//"#;
// let p: Result<Technique> = serde_json::from_str(data);
// assert!(p.is_ok());
// //assert_eq!(p.unwrap().name, "variable".to_string());
// let s = translate(&p.unwrap());
// assert!(s.is_ok());
// print!("{}",s.unwrap());
// }
//}
| translate_file | identifier_name |
technique.rs | use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::error::*;
use std::fs;
use std::path::Path;
use regex::Regex;
use lazy_static::lazy_static;
use toml;
use nom::combinator::*;
use nom::sequence::*;
use nom::bytes::complete::*;
use nom::character::complete::*;
use nom::branch::alt;
use nom::multi::many1;
use nom::IResult;
use std::str;
#[derive(Serialize, Deserialize)]
struct Technique {
name: String,
description: String,
version: String,
bundle_name: String,
parameter: Vec<Value>,
bundle_args: Vec<String>,
method_calls: Vec<MethodCall>,
}
#[derive(Serialize, Deserialize)]
struct MethodCall {
method_name: String,
class_context: String,
args: Vec<String>,
component: String,
}
pub fn translate_file(json_file: &Path, rl_file: &Path) -> Result<()> {
let config_data = fs::read_to_string("data/config.toml").expect("Cannot read config.toml file");
let config: toml::Value = toml::from_str(&config_data).expect("Invalig config.toml file");
// we use if let for error conversion
// we don't use match for better linear reading
let json_data = fs::read_to_string(&json_file);
if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) }
let technique = serde_json::from_str::<Technique>(&json_data.unwrap());
if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) }
let rl_technique = translate(&config, &technique.unwrap())?;
if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) }
Ok(())
}
fn translate(config: &toml::Value, technique: &Technique) -> Result<String> {
let parameters_meta = serde_json::to_string(&technique.parameter);
if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) }
let parameters = technique.bundle_args.join(",");
let calls = map_strings_results(
technique.method_calls.iter(), |c| translate_call(config, c), "\n"
)?;
let out = format!(r#"@format=0
# This file has been generated with rltranslate
@name="{name}"
@description="{description}"
@version="{version}"
@parameters={parameters_meta}
resource {bundle_name}({parameters})
{bundle_name} state technique() {{
{calls}
}}
"#, description=technique.description,
version=technique.version,
name=technique.name,
bundle_name=technique.bundle_name,
parameters_meta=parameters_meta.unwrap(),
parameters=parameters,
calls=calls);
Ok(out)
}
fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> {
lazy_static! {
static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap();
}
// separate resource and state
let (resource,state) = match RE.captures(&call.method_name) {
Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()),
None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))),
};
// split argument list
let rconf = match config.get("resources") {
None => return Err(Error::User("No resources section in config.toml".into())),
Some(m) => m,
};
let res_arg_v = match rconf.get(resource) {
None => toml::value::Value::Integer(1),
Some(r) => r.clone(),
};
let res_arg_count: usize = match res_arg_v.as_integer() {
None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))),
Some(v) => v as usize,
};
let it = &mut call.args.iter();
let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?;
let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?;
// call formating
let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args);
let out_state = if call.class_context == "any" {
format!(" {}", call_str)
} else {
let condition = translate_condition(config, &call.class_context)?;
format!(" if {} => {}", condition, call_str)
};
// outcome detection and formating
let mconf = match config.get("methods") {
None => return Err(Error::User("No methods section in config.toml".into())),
Some(m) => m,
};
let method = match mconf.get(&call.method_name) {
None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))),
Some(m) => m,
};
let class_prefix = match method.get("class_prefix") {
None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))),
Some(m) => m.as_str().unwrap(),
};
let class_parameter_id = match method.get("class_parameter_id") {
None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))),
Some(m) => m.as_integer().unwrap(),
};
let class_parameter_value = &call.args[class_parameter_id as usize];
let canonic_parameter = canonify(class_parameter_value);
let outcome = format!(" as {}_{}",class_prefix,canonic_parameter);
// TODO remove outcome if there is no usage
Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome))
}
fn canonify(input: &str) -> String {
let s = input.as_bytes().iter()
.map(|x|
if x.is_ascii_alphanumeric() || *x == b'_' {
*x
} else {
b'_'
}
)
.collect::<Vec<u8>>();
str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned()
}
#[derive(Clone)]
struct CFVariable {
ns: Option<String>,
name: String,
}
fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> {
map(tuple((
opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())),
map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()),
)), |(ns, name)| CFVariable { ns, name })(i)
}
#[derive(Clone)]
enum CFStringElt {
Static(String), // static content
Variable(CFVariable), // variable name
}
impl CFStringElt {
fn to_string(&self) -> Result<String> {
Ok(match self {
CFStringElt::Static(s) => s.to_string(),
CFStringElt::Variable(v) => {
match &v.ns {
None => v.name.clone(), // a parameter
Some(ns) => match ns.as_ref() {
"const" => (match v.name.as_ref() {
"dollar" => "$",
"dirsep" => "/",
"endl" => "\\n",
"n" => "\\n",
"r" => "\\r",
"t" => "\\t",
_ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))),
}).into(),
"sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))),
"this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))),
ns => format!("${{{}.{}}}",ns,v.name),
},
}
// TODO
// - array ->?
// - list ->?
},
})
}
}
fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> {
// There is a rest inside so this just serve as a guard
all_consuming(
alt((
many1(alt((
// variable ${}
map(
delimited(tag("${"), parse_cfvariable, tag("}")),
CFStringElt::Variable),
// variable $()
map(
delimited(tag("$("), parse_cfvariable, tag(")")),
CFStringElt::Variable),
// constant
map(take_until("$"), |s: &str| CFStringElt::Static(s.into())),
// end of string
map(preceded(
peek(anychar), // do no take rest if we are already at the end
rest),
|s: &str| CFStringElt::Static(s.into())),
))), |
fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> {
let var = match parse_cfstring(arg) {
Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))),
Ok((_,o)) => o
};
map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",")
}
fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> {
lazy_static! {
static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap();
static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap();
}
// detect method outcome class
if let Some(caps) = METHOD_RE.captures(cond) {
let (method, status) = (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str());
if vec![ "kept", "success" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ success", method));
} else if vec![ "error", "not_ok", "failed", "denied", "timeout" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ error", method));
} else if vec![ "repaired", "ok", "reached" ].iter().any(|x| x == &status) {
return Ok(format!("{} =~ {}", method, status));
}
};
// detect system classes
if let Some(caps) = OS_RE.captures(cond) {
// TODO here we consider any match is an os match, should we have an OS whitelist?
// OS are global enum so we don't have to say which enum to match
return Ok(cond.into());
}
// TODO detect condition expressions
Err(Error::User(format!("Don't know how to handle class '{}'", cond)))
}
//#[cfg(test)]
//mod tests {
// use super::*;
//
// #[test]
// fn test_json() {
// let data = r#"
//{
// "name": "variable",
// "description": "",
// "version": "1.0",
// "bundle_name": "variable",
// "parameter": [
// {
// "constraints": {
// "allow_whitespace_string": false,
// "allow_empty_string": false,
// "max_length": 16384
// },
// "name": "iname",
// "id": "53042794-4d2a-41c7-a690-b0d760a78a51"
// },
// {
// "constraints": {
// "allow_whitespace_string": false,
// "allow_empty_string": false,
// "max_length": 16384
// },
// "name": "ip",
// "id": "aa74f824-6085-46b4-94b4-42803760fd61"
// }
// ],
// "bundle_args": [
// "iname",
// "ip"
// ],
// "method_calls": [
// {
// "method_name": "variable_string",
// "class_context": "any",
// "args": [
// "foo",
// "bar",
// "vim"
// ],
// "component": "Variable string"
// },
// {
// "method_name": "package_state",
// "class_context": "any",
// "args": [
// "${foo.bar}",
// "",
// "",
// "",
// "present"
// ],
// "component": "Package state"
// }
// ]
//}
//"#;
// let p: Result<Technique> = serde_json::from_str(data);
// assert!(p.is_ok());
// //assert_eq!(p.unwrap().name, "variable".to_string());
// let s = translate(&p.unwrap());
// assert!(s.is_ok());
// print!("{}",s.unwrap());
// }
//} | // empty string
value(vec![CFStringElt::Static("".into())], not(anychar)),
))
)(i)
} | random_line_split |
SignalDef.rs | /* Return frame for iretq */
pub rip: u64,
pub cs: u64,
pub eflags: u64,
pub rsp: u64,
pub ss: u64,
/* top of stack page */
}
impl PtRegs {
pub fn Set(&mut self, ctx: &SigContext) {
self.r15 = ctx.r15;
self.r14 = ctx.r14;
self.r13 = ctx.r13;
self.r12 = ctx.r12;
self.rbp = ctx.rbp;
self.rbx = ctx.rbx;
self.r11 = ctx.r11;
self.r10 = ctx.r10;
self.r9 = ctx.r9;
self.r8 = ctx.r8;
self.rax = ctx.rax;
self.rcx = ctx.rcx;
self.rdx = ctx.rdx;
self.rsi = ctx.rsi;
self.rdi = ctx.rdi;
self.orig_rax = ctx.rax;
self.rip = ctx.rip;
self.cs = ctx.cs as u64;
self.eflags = ctx.eflags;
self.rsp = ctx.rsp;
self.ss = ctx.ss as u64;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRetInfo {
pub sigInfoAddr: u64,
pub sigCtxAddr: u64,
pub ret: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct Kill {
pub pid: i32,
pub uid: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigTimer {
pub tid: i32,
pub overrun: i32,
pub sigval: u64,
pub sysPrivate: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRt {
pub pid: i32,
pub uid: u32,
pub sigval: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigChld {
pub pid: i32,
//child
pub uid: u32,
//sender's uid
pub status: i32,
//Exit code
pub uTime: i32,
pub sTime: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFault {
pub addr: u64,
pub lsb: u16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct SignalInfo {
pub Signo: i32,
// Signal number
pub Errno: i32,
// Errno value
pub Code: i32,
// Signal code
pub _r: u32,
pub fields: [u8; 128 - 16],
}
impl<'a> Default for SignalInfo {
fn default() -> Self {
return Self {
Signo: 0,
Errno: 0,
Code: 0,
_r: 0,
fields: [0; 128 - 16]
}
}
}
impl core::fmt::Debug for SignalInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SignalInfo")
.field("Signo", &self.Signo)
.field("Errno", &self.Errno)
.field("Code", &self.Code)
.finish()
}
}
impl SignalInfo {
pub fn SignalInfoPriv(sig: Signal) -> Self {
return Self {
Signo: sig.0,
Code: Self::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// FixSignalCodeForUser fixes up si_code.
//
// The si_code we get from Linux may contain the kernel-specific code in the
// top 16 bits if it's positive (e.g., from ptrace). Linux's
// copy_siginfo_to_user does
// err |= __put_user((short)from->si_code, &to->si_code);
// to mask out those bits and we need to do the same.
pub fn FixSignalCodeForUser(&mut self) {
if self.Code > 0 {
self.Code &= 0xffff;
}
}
pub fn Kill(&self) -> &mut Kill {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut Kill)
}
}
pub fn SigTimer(&mut self) -> &mut SigTimer {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigTimer)
}
}
pub fn SigRt(&mut self) -> &mut SigRt {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigRt)
}
}
pub fn SigChld(&mut self) -> &mut SigChld {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigChld)
}
}
pub fn SigFault(&self) -> &mut SigFault {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigFault)
}
}
// SignalInfoUser (properly SI_USER) indicates that a signal was sent from
// a kill() or raise() syscall.
pub const SIGNAL_INFO_USER: i32 = 0;
// SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
// by the kernel.
pub const SIGNAL_INFO_KERNEL: i32 = 0x80;
// SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
// by an expired timer.
pub const SIGNAL_INFO_TIMER: i32 = -2;
// SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
// from a tkill() or tgkill() syscall.
pub const SIGNAL_INFO_TKILL: i32 = -6;
// CLD_* codes are only meaningful for SIGCHLD.
// CLD_EXITED indicates that a task exited.
pub const CLD_EXITED: i32 = 1;
// CLD_KILLED indicates that a task was killed by a signal.
pub const CLD_KILLED: i32 = 2;
// CLD_DUMPED indicates that a task was killed by a signal and then dumped
// core.
pub const CLD_DUMPED: i32 = 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT!= 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO!= 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER!= 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART!= 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER!= 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK!= 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER!= 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn New(sig: Signal) -> Self {
return SignalSet(1 << sig.Index())
}
pub fn Add(&mut self, sig: Signal) {
self.0 |= 1 << sig.Index()
}
pub fn Remove(&mut self, sig: Signal) {
self.0 &=!(1 << sig.0)
}
pub fn TailingZero(&self) -> usize {
for i in 0..64 {
let idx = 64 - i - 1;
if self.0 & (1 << idx)!= 0 |
}
return 64
}
pub fn MakeSignalSet(sigs: &[Signal]) -> Self {
let mut res = Self::default();
for sig in sigs {
res.Add(*sig)
}
return res;
}
pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) {
for i in 0..64 {
if self.0 & (1 << i)!= 0 {
f(Signal(i as i32 + 1))
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct SignalQueue {
signals: LinkedList<PendingSignal>,
}
impl SignalQueue {
pub const RT_SIG_CAP: usize = 32;
pub fn Len(&mut self) -> u64 {
return self.signals.len() as u64;
}
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool {
if self.signals.len() == Self::RT_SIG_CAP {
return false
}
self.signals.push_back(PendingSignal {
sigInfo: info,
timer: timer,
});
return true
}
pub fn Deque(&mut self) -> Option<PendingSignal> {
return self.signals.pop_front();
}
pub fn Clear(&mut self) {
self.signals.clear();
}
}
pub const SIGNAL_COUNT: usize = 64;
pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31
pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64
pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64
#[derive(Debug, Clone, Default)]
pub struct PendingSignal {
pub sigInfo: Box<SignalInfo>,
pub timer: Option<IntervalTimer>,
}
pub struct PendingSignals {
pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT],
pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT],
pub pendingSet: SignalSet,
}
impl fmt::Debug for PendingSignals {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingSignals")
.field("stdSignals", &self.stdSignals)
.field("rtSignals0", &self.rtSignals[0])
.field("rtSignals2", &self.rtSignals[32])
.field("pendingSet", &self.pendingSet)
.finish()
}
}
impl Default for PendingSignals {
fn default() -> Self {
return Self {
stdSignals : Default::default(),
rtSignals : [
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(),
],
pendingSet: Default::default(),
}
}
}
impl PendingSignals {
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> {
let sig = Signal(info.Signo);
if sig.IsStandard() {
match &self.stdSignals[sig.Index()] {
None => (),
_ => return Ok(false),
}
self.stdSignals[sig.Index()] = Some(PendingSignal {
sigInfo: info,
timer: timer,
});
self.pendingSet.Add(sig);
return Ok(true);
} else if sig.IsRealtime() {
let q = &mut self.rtSignals[sig.Index() - 31];
self.pendingSet.Add(sig);
return Ok(q.Enque(info, timer));
} else {
return Err(Error::InvalidInput)
}
}
pub fn HasSignal(&self, mask: SignalSet) -> bool {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return false
}
return true;
}
pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return None
}
let lastOne = set.TailingZero();
if lastOne < STD_SIGNAL_COUNT {
self.pendingSet.0 &=!(1 << lastOne);
let ps = self.stdSignals[lastOne].take();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 {
self.pendingSet.0 &=!(1 << lastOne);
}
let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
pub fn Discard(&mut self, sig: Signal) {
self.pendingSet.0 &=!(1 << sig.Index());
| {
return idx
} | conditional_block |
SignalDef.rs | /* Return frame for iretq */
pub rip: u64,
pub cs: u64,
pub eflags: u64,
pub rsp: u64,
pub ss: u64,
/* top of stack page */
}
impl PtRegs {
pub fn Set(&mut self, ctx: &SigContext) {
self.r15 = ctx.r15;
self.r14 = ctx.r14;
self.r13 = ctx.r13;
self.r12 = ctx.r12;
self.rbp = ctx.rbp;
self.rbx = ctx.rbx;
self.r11 = ctx.r11;
self.r10 = ctx.r10;
self.r9 = ctx.r9;
self.r8 = ctx.r8;
self.rax = ctx.rax;
self.rcx = ctx.rcx;
self.rdx = ctx.rdx;
self.rsi = ctx.rsi;
self.rdi = ctx.rdi;
self.orig_rax = ctx.rax;
self.rip = ctx.rip;
self.cs = ctx.cs as u64;
self.eflags = ctx.eflags;
self.rsp = ctx.rsp;
self.ss = ctx.ss as u64;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRetInfo {
pub sigInfoAddr: u64,
pub sigCtxAddr: u64,
pub ret: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct Kill {
pub pid: i32,
pub uid: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigTimer {
pub tid: i32,
pub overrun: i32,
pub sigval: u64,
pub sysPrivate: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRt {
pub pid: i32,
pub uid: u32,
pub sigval: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigChld {
pub pid: i32,
//child
pub uid: u32,
//sender's uid
pub status: i32,
//Exit code
pub uTime: i32,
pub sTime: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFault {
pub addr: u64,
pub lsb: u16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct SignalInfo {
pub Signo: i32,
// Signal number
pub Errno: i32,
// Errno value
pub Code: i32,
// Signal code
pub _r: u32,
pub fields: [u8; 128 - 16],
}
impl<'a> Default for SignalInfo {
fn default() -> Self {
return Self {
Signo: 0,
Errno: 0,
Code: 0,
_r: 0,
fields: [0; 128 - 16]
}
}
}
impl core::fmt::Debug for SignalInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SignalInfo")
.field("Signo", &self.Signo)
.field("Errno", &self.Errno)
.field("Code", &self.Code)
.finish()
}
}
impl SignalInfo {
pub fn SignalInfoPriv(sig: Signal) -> Self {
return Self {
Signo: sig.0,
Code: Self::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// FixSignalCodeForUser fixes up si_code.
//
// The si_code we get from Linux may contain the kernel-specific code in the
// top 16 bits if it's positive (e.g., from ptrace). Linux's
// copy_siginfo_to_user does
// err |= __put_user((short)from->si_code, &to->si_code);
// to mask out those bits and we need to do the same.
pub fn FixSignalCodeForUser(&mut self) {
if self.Code > 0 {
self.Code &= 0xffff;
}
}
pub fn Kill(&self) -> &mut Kill {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut Kill)
}
}
pub fn SigTimer(&mut self) -> &mut SigTimer {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigTimer)
}
}
pub fn SigRt(&mut self) -> &mut SigRt {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigRt)
}
}
pub fn SigChld(&mut self) -> &mut SigChld {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigChld)
}
}
pub fn SigFault(&self) -> &mut SigFault {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigFault)
}
}
// SignalInfoUser (properly SI_USER) indicates that a signal was sent from
// a kill() or raise() syscall.
pub const SIGNAL_INFO_USER: i32 = 0;
// SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
// by the kernel.
pub const SIGNAL_INFO_KERNEL: i32 = 0x80;
// SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
// by an expired timer.
pub const SIGNAL_INFO_TIMER: i32 = -2;
// SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
// from a tkill() or tgkill() syscall.
pub const SIGNAL_INFO_TKILL: i32 = -6;
// CLD_* codes are only meaningful for SIGCHLD.
// CLD_EXITED indicates that a task exited.
pub const CLD_EXITED: i32 = 1;
// CLD_KILLED indicates that a task was killed by a signal.
pub const CLD_KILLED: i32 = 2;
// CLD_DUMPED indicates that a task was killed by a signal and then dumped
// core.
pub const CLD_DUMPED: i32 = 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT!= 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO!= 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER!= 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART!= 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER!= 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK!= 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER!= 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn | (sig: Signal) -> Self {
return SignalSet(1 << sig.Index())
}
pub fn Add(&mut self, sig: Signal) {
self.0 |= 1 << sig.Index()
}
pub fn Remove(&mut self, sig: Signal) {
self.0 &=!(1 << sig.0)
}
pub fn TailingZero(&self) -> usize {
for i in 0..64 {
let idx = 64 - i - 1;
if self.0 & (1 << idx)!= 0 {
return idx
}
}
return 64
}
pub fn MakeSignalSet(sigs: &[Signal]) -> Self {
let mut res = Self::default();
for sig in sigs {
res.Add(*sig)
}
return res;
}
pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) {
for i in 0..64 {
if self.0 & (1 << i)!= 0 {
f(Signal(i as i32 + 1))
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct SignalQueue {
signals: LinkedList<PendingSignal>,
}
impl SignalQueue {
pub const RT_SIG_CAP: usize = 32;
pub fn Len(&mut self) -> u64 {
return self.signals.len() as u64;
}
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool {
if self.signals.len() == Self::RT_SIG_CAP {
return false
}
self.signals.push_back(PendingSignal {
sigInfo: info,
timer: timer,
});
return true
}
pub fn Deque(&mut self) -> Option<PendingSignal> {
return self.signals.pop_front();
}
pub fn Clear(&mut self) {
self.signals.clear();
}
}
pub const SIGNAL_COUNT: usize = 64;
pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31
pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64
pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64
#[derive(Debug, Clone, Default)]
pub struct PendingSignal {
pub sigInfo: Box<SignalInfo>,
pub timer: Option<IntervalTimer>,
}
pub struct PendingSignals {
pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT],
pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT],
pub pendingSet: SignalSet,
}
impl fmt::Debug for PendingSignals {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingSignals")
.field("stdSignals", &self.stdSignals)
.field("rtSignals0", &self.rtSignals[0])
.field("rtSignals2", &self.rtSignals[32])
.field("pendingSet", &self.pendingSet)
.finish()
}
}
impl Default for PendingSignals {
fn default() -> Self {
return Self {
stdSignals : Default::default(),
rtSignals : [
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(),
],
pendingSet: Default::default(),
}
}
}
impl PendingSignals {
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> {
let sig = Signal(info.Signo);
if sig.IsStandard() {
match &self.stdSignals[sig.Index()] {
None => (),
_ => return Ok(false),
}
self.stdSignals[sig.Index()] = Some(PendingSignal {
sigInfo: info,
timer: timer,
});
self.pendingSet.Add(sig);
return Ok(true);
} else if sig.IsRealtime() {
let q = &mut self.rtSignals[sig.Index() - 31];
self.pendingSet.Add(sig);
return Ok(q.Enque(info, timer));
} else {
return Err(Error::InvalidInput)
}
}
pub fn HasSignal(&self, mask: SignalSet) -> bool {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return false
}
return true;
}
pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return None
}
let lastOne = set.TailingZero();
if lastOne < STD_SIGNAL_COUNT {
self.pendingSet.0 &=!(1 << lastOne);
let ps = self.stdSignals[lastOne].take();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 {
self.pendingSet.0 &=!(1 << lastOne);
}
let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
pub fn Discard(&mut self, sig: Signal) {
self.pendingSet.0 &=!(1 << sig.Index());
| New | identifier_name |
SignalDef.rs |
/* Return frame for iretq */
pub rip: u64, | pub rsp: u64,
pub ss: u64,
/* top of stack page */
}
impl PtRegs {
pub fn Set(&mut self, ctx: &SigContext) {
self.r15 = ctx.r15;
self.r14 = ctx.r14;
self.r13 = ctx.r13;
self.r12 = ctx.r12;
self.rbp = ctx.rbp;
self.rbx = ctx.rbx;
self.r11 = ctx.r11;
self.r10 = ctx.r10;
self.r9 = ctx.r9;
self.r8 = ctx.r8;
self.rax = ctx.rax;
self.rcx = ctx.rcx;
self.rdx = ctx.rdx;
self.rsi = ctx.rsi;
self.rdi = ctx.rdi;
self.orig_rax = ctx.rax;
self.rip = ctx.rip;
self.cs = ctx.cs as u64;
self.eflags = ctx.eflags;
self.rsp = ctx.rsp;
self.ss = ctx.ss as u64;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRetInfo {
pub sigInfoAddr: u64,
pub sigCtxAddr: u64,
pub ret: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct Kill {
pub pid: i32,
pub uid: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigTimer {
pub tid: i32,
pub overrun: i32,
pub sigval: u64,
pub sysPrivate: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigRt {
pub pid: i32,
pub uid: u32,
pub sigval: u64,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigChld {
pub pid: i32,
//child
pub uid: u32,
//sender's uid
pub status: i32,
//Exit code
pub uTime: i32,
pub sTime: i32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFault {
pub addr: u64,
pub lsb: u16,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct SignalInfo {
pub Signo: i32,
// Signal number
pub Errno: i32,
// Errno value
pub Code: i32,
// Signal code
pub _r: u32,
pub fields: [u8; 128 - 16],
}
impl<'a> Default for SignalInfo {
fn default() -> Self {
return Self {
Signo: 0,
Errno: 0,
Code: 0,
_r: 0,
fields: [0; 128 - 16]
}
}
}
impl core::fmt::Debug for SignalInfo {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SignalInfo")
.field("Signo", &self.Signo)
.field("Errno", &self.Errno)
.field("Code", &self.Code)
.finish()
}
}
impl SignalInfo {
pub fn SignalInfoPriv(sig: Signal) -> Self {
return Self {
Signo: sig.0,
Code: Self::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// FixSignalCodeForUser fixes up si_code.
//
// The si_code we get from Linux may contain the kernel-specific code in the
// top 16 bits if it's positive (e.g., from ptrace). Linux's
// copy_siginfo_to_user does
// err |= __put_user((short)from->si_code, &to->si_code);
// to mask out those bits and we need to do the same.
pub fn FixSignalCodeForUser(&mut self) {
if self.Code > 0 {
self.Code &= 0xffff;
}
}
pub fn Kill(&self) -> &mut Kill {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut Kill)
}
}
pub fn SigTimer(&mut self) -> &mut SigTimer {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigTimer)
}
}
pub fn SigRt(&mut self) -> &mut SigRt {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigRt)
}
}
pub fn SigChld(&mut self) -> &mut SigChld {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigChld)
}
}
pub fn SigFault(&self) -> &mut SigFault {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigFault)
}
}
// SignalInfoUser (properly SI_USER) indicates that a signal was sent from
// a kill() or raise() syscall.
pub const SIGNAL_INFO_USER: i32 = 0;
// SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
// by the kernel.
pub const SIGNAL_INFO_KERNEL: i32 = 0x80;
// SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
// by an expired timer.
pub const SIGNAL_INFO_TIMER: i32 = -2;
// SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
// from a tkill() or tgkill() syscall.
pub const SIGNAL_INFO_TKILL: i32 = -6;
// CLD_* codes are only meaningful for SIGCHLD.
// CLD_EXITED indicates that a task exited.
pub const CLD_EXITED: i32 = 1;
// CLD_KILLED indicates that a task was killed by a signal.
pub const CLD_KILLED: i32 = 2;
// CLD_DUMPED indicates that a task was killed by a signal and then dumped
// core.
pub const CLD_DUMPED: i32 = 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT!= 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO!= 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER!= 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART!= 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER!= 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK!= 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER!= 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn New(sig: Signal) -> Self {
return SignalSet(1 << sig.Index())
}
pub fn Add(&mut self, sig: Signal) {
self.0 |= 1 << sig.Index()
}
pub fn Remove(&mut self, sig: Signal) {
self.0 &=!(1 << sig.0)
}
pub fn TailingZero(&self) -> usize {
for i in 0..64 {
let idx = 64 - i - 1;
if self.0 & (1 << idx)!= 0 {
return idx
}
}
return 64
}
pub fn MakeSignalSet(sigs: &[Signal]) -> Self {
let mut res = Self::default();
for sig in sigs {
res.Add(*sig)
}
return res;
}
pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) {
for i in 0..64 {
if self.0 & (1 << i)!= 0 {
f(Signal(i as i32 + 1))
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct SignalQueue {
signals: LinkedList<PendingSignal>,
}
impl SignalQueue {
pub const RT_SIG_CAP: usize = 32;
pub fn Len(&mut self) -> u64 {
return self.signals.len() as u64;
}
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool {
if self.signals.len() == Self::RT_SIG_CAP {
return false
}
self.signals.push_back(PendingSignal {
sigInfo: info,
timer: timer,
});
return true
}
pub fn Deque(&mut self) -> Option<PendingSignal> {
return self.signals.pop_front();
}
pub fn Clear(&mut self) {
self.signals.clear();
}
}
pub const SIGNAL_COUNT: usize = 64;
pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31
pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64
pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64
#[derive(Debug, Clone, Default)]
pub struct PendingSignal {
pub sigInfo: Box<SignalInfo>,
pub timer: Option<IntervalTimer>,
}
pub struct PendingSignals {
pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT],
pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT],
pub pendingSet: SignalSet,
}
impl fmt::Debug for PendingSignals {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingSignals")
.field("stdSignals", &self.stdSignals)
.field("rtSignals0", &self.rtSignals[0])
.field("rtSignals2", &self.rtSignals[32])
.field("pendingSet", &self.pendingSet)
.finish()
}
}
impl Default for PendingSignals {
fn default() -> Self {
return Self {
stdSignals : Default::default(),
rtSignals : [
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(),
],
pendingSet: Default::default(),
}
}
}
impl PendingSignals {
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> {
let sig = Signal(info.Signo);
if sig.IsStandard() {
match &self.stdSignals[sig.Index()] {
None => (),
_ => return Ok(false),
}
self.stdSignals[sig.Index()] = Some(PendingSignal {
sigInfo: info,
timer: timer,
});
self.pendingSet.Add(sig);
return Ok(true);
} else if sig.IsRealtime() {
let q = &mut self.rtSignals[sig.Index() - 31];
self.pendingSet.Add(sig);
return Ok(q.Enque(info, timer));
} else {
return Err(Error::InvalidInput)
}
}
pub fn HasSignal(&self, mask: SignalSet) -> bool {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return false
}
return true;
}
pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return None
}
let lastOne = set.TailingZero();
if lastOne < STD_SIGNAL_COUNT {
self.pendingSet.0 &=!(1 << lastOne);
let ps = self.stdSignals[lastOne].take();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 {
self.pendingSet.0 &=!(1 << lastOne);
}
let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
pub fn Discard(&mut self, sig: Signal) {
self.pendingSet.0 &=!(1 << sig.Index());
| pub cs: u64,
pub eflags: u64, | random_line_split |
SignalDef.rs | .field("Signo", &self.Signo)
.field("Errno", &self.Errno)
.field("Code", &self.Code)
.finish()
}
}
impl SignalInfo {
pub fn SignalInfoPriv(sig: Signal) -> Self {
return Self {
Signo: sig.0,
Code: Self::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// FixSignalCodeForUser fixes up si_code.
//
// The si_code we get from Linux may contain the kernel-specific code in the
// top 16 bits if it's positive (e.g., from ptrace). Linux's
// copy_siginfo_to_user does
// err |= __put_user((short)from->si_code, &to->si_code);
// to mask out those bits and we need to do the same.
pub fn FixSignalCodeForUser(&mut self) {
if self.Code > 0 {
self.Code &= 0xffff;
}
}
pub fn Kill(&self) -> &mut Kill {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut Kill)
}
}
pub fn SigTimer(&mut self) -> &mut SigTimer {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigTimer)
}
}
pub fn SigRt(&mut self) -> &mut SigRt {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigRt)
}
}
pub fn SigChld(&mut self) -> &mut SigChld {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigChld)
}
}
pub fn SigFault(&self) -> &mut SigFault {
let addr = &self.fields[0] as *const _ as u64;
return unsafe {
&mut *(addr as *mut SigFault)
}
}
// SignalInfoUser (properly SI_USER) indicates that a signal was sent from
// a kill() or raise() syscall.
pub const SIGNAL_INFO_USER: i32 = 0;
// SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
// by the kernel.
pub const SIGNAL_INFO_KERNEL: i32 = 0x80;
// SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
// by an expired timer.
pub const SIGNAL_INFO_TIMER: i32 = -2;
// SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
// from a tkill() or tgkill() syscall.
pub const SIGNAL_INFO_TKILL: i32 = -6;
// CLD_* codes are only meaningful for SIGCHLD.
// CLD_EXITED indicates that a task exited.
pub const CLD_EXITED: i32 = 1;
// CLD_KILLED indicates that a task was killed by a signal.
pub const CLD_KILLED: i32 = 2;
// CLD_DUMPED indicates that a task was killed by a signal and then dumped
// core.
pub const CLD_DUMPED: i32 = 3;
// CLD_TRAPPED indicates that a task was stopped by ptrace.
pub const CLD_TRAPPED: i32 = 4;
// CLD_STOPPED indicates that a thread group completed a group stop.
pub const CLD_STOPPED: i32 = 5;
// CLD_CONTINUED indicates that a group-stopped thread group was continued.
pub const CLD_CONTINUED: i32 = 6;
// SYS_* codes are only meaningful for SIGSYS.
// SYS_SECCOMP indicates that a signal originates from seccomp.
pub const SYS_SECCOMP: i32 = 1;
// TRAP_* codes are only meaningful for SIGTRAP.
// TRAP_BRKPT indicates a breakpoint trap.
pub const TRAP_BRKPT: i32 = 1;
}
pub const UC_FP_XSTATE: u64 = 1;
pub const UC_SIGCONTEXT_SS: u64 = 2;
pub const UC_STRICT_RESTORE_SS: u64 = 4;
// https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct UContext {
pub Flags: u64,
pub Link: u64,
pub Stack: SignalStack,
pub MContext: SigContext,
pub Sigset: u64,
}
impl UContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self {
return Self {
Flags: 2,
Link: 0,
Stack: alt.clone(),
MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate),
Sigset: 0,
}
}
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigContext {
pub r8: u64,
pub r9: u64,
pub r10: u64,
pub r11: u64,
pub r12: u64,
pub r13: u64,
pub r14: u64,
pub r15: u64,
pub rdi: u64,
pub rsi: u64,
pub rbp: u64,
pub rbx: u64,
pub rdx: u64,
pub rax: u64,
pub rcx: u64,
pub rsp: u64,
pub rip: u64,
pub eflags: u64,
pub cs: u16,
pub gs: u16,
// always 0 on amd64.
pub fs: u16,
// always 0 on amd64.
pub ss: u16,
// only restored if _UC_STRICT_RESTORE_SS (unsupported).
pub err: u64,
pub trapno: u64,
pub oldmask: u64,
pub cr2: u64,
// Pointer to a struct _fpstate.
pub fpstate: u64,
pub reserved: [u64; 8],
}
impl SigContext {
pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self {
return Self {
r8: ptRegs.r8,
r9: ptRegs.r9,
r10: ptRegs.r10,
r11: ptRegs.r11,
r12: ptRegs.r12,
r13: ptRegs.r13,
r14: ptRegs.r14,
r15: ptRegs.r15,
rdi: ptRegs.rdi,
rsi: ptRegs.rsi,
rbp: ptRegs.rbp,
rbx: ptRegs.rbx,
rdx: ptRegs.rdx,
rax: ptRegs.rax,
rcx: ptRegs.rcx,
rsp: ptRegs.rsp,
rip: ptRegs.rip,
eflags: ptRegs.eflags,
cs: ptRegs.cs as u16,
gs: 0,
fs: 0,
ss: ptRegs.ss as u16,
err: 0,
trapno: 0,
oldmask: oldMask,
cr2: cr2,
fpstate: fpstate,
..Default::default()
}
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, Default)]
pub struct SigFlag(pub u64);
impl SigFlag {
pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001;
pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002;
pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004;
pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000;
pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000;
pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000;
pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000;
pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000;
pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000;
pub fn IsNoCldStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0;
}
pub fn IsNoCldWait(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT!= 0;
}
pub fn IsSigInfo(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_SIG_INFO!= 0;
}
pub fn IsNoDefer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_DEFER!= 0;
}
pub fn IsRestart(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTART!= 0;
}
pub fn IsResetHandler(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER!= 0;
}
pub fn IsOnStack(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_ON_STACK!= 0;
}
pub fn HasRestorer(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_RESTORER!= 0;
}
pub fn IsNoChildStop(&self) -> bool {
return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP!= 0
}
}
// https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h
#[derive(Copy, Clone, Default)]
#[repr(C)]
pub struct SigAct {
pub handler: u64,
pub flags: SigFlag,
pub restorer: u64,
pub mask: u64,
}
impl fmt::Debug for SigAct {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SigAction {{ \n\
handler: {:x}, \n\
flag : {:x}, \n \
flags::HasRestorer: {}, \n \
flags::IsOnStack: {}, \n \
flags::IsRestart: {}, \n \
flags::IsResetHandler: {}, \n \
flags::IsNoDefer: {}, \n \
flags::IsSigInfo: {}, \n \
restorer : {:x}, \n\
mask: {:x}, \n}}",
self.handler,
self.flags.0,
self.flags.HasRestorer(),
self.flags.IsOnStack(),
self.flags.IsRestart(),
self.flags.IsResetHandler(),
self.flags.IsNoDefer(),
self.flags.IsSigInfo(),
self.restorer,
self.mask
)
}
}
impl SigAct {
// SignalActDefault is SIG_DFL and specifies that the default behavior for
// a signal should be taken.
pub const SIGNAL_ACT_DEFAULT: u64 = 0;
// SignalActIgnore is SIG_IGN and specifies that a signal should be
// ignored.
pub const SIGNAL_ACT_IGNORE: u64 = 1;
}
pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1);
#[derive(Clone, Copy, Debug)]
pub struct SignalSet(pub u64);
impl Default for SignalSet {
fn default() -> Self {
return Self(0)
}
}
impl SignalSet {
pub fn New(sig: Signal) -> Self {
return SignalSet(1 << sig.Index())
}
pub fn Add(&mut self, sig: Signal) {
self.0 |= 1 << sig.Index()
}
pub fn Remove(&mut self, sig: Signal) {
self.0 &=!(1 << sig.0)
}
pub fn TailingZero(&self) -> usize {
for i in 0..64 {
let idx = 64 - i - 1;
if self.0 & (1 << idx)!= 0 {
return idx
}
}
return 64
}
pub fn MakeSignalSet(sigs: &[Signal]) -> Self {
let mut res = Self::default();
for sig in sigs {
res.Add(*sig)
}
return res;
}
pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) {
for i in 0..64 {
if self.0 & (1 << i)!= 0 {
f(Signal(i as i32 + 1))
}
}
}
}
#[derive(Debug, Clone, Default)]
pub struct SignalQueue {
signals: LinkedList<PendingSignal>,
}
impl SignalQueue {
pub const RT_SIG_CAP: usize = 32;
pub fn Len(&mut self) -> u64 {
return self.signals.len() as u64;
}
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool {
if self.signals.len() == Self::RT_SIG_CAP {
return false
}
self.signals.push_back(PendingSignal {
sigInfo: info,
timer: timer,
});
return true
}
pub fn Deque(&mut self) -> Option<PendingSignal> {
return self.signals.pop_front();
}
pub fn Clear(&mut self) {
self.signals.clear();
}
}
pub const SIGNAL_COUNT: usize = 64;
pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31
pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64
pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64
#[derive(Debug, Clone, Default)]
pub struct PendingSignal {
pub sigInfo: Box<SignalInfo>,
pub timer: Option<IntervalTimer>,
}
pub struct PendingSignals {
pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT],
pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT],
pub pendingSet: SignalSet,
}
impl fmt::Debug for PendingSignals {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PendingSignals")
.field("stdSignals", &self.stdSignals)
.field("rtSignals0", &self.rtSignals[0])
.field("rtSignals2", &self.rtSignals[32])
.field("pendingSet", &self.pendingSet)
.finish()
}
}
impl Default for PendingSignals {
fn default() -> Self {
return Self {
stdSignals : Default::default(),
rtSignals : [
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(),
SignalQueue::default(),
],
pendingSet: Default::default(),
}
}
}
impl PendingSignals {
pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> {
let sig = Signal(info.Signo);
if sig.IsStandard() {
match &self.stdSignals[sig.Index()] {
None => (),
_ => return Ok(false),
}
self.stdSignals[sig.Index()] = Some(PendingSignal {
sigInfo: info,
timer: timer,
});
self.pendingSet.Add(sig);
return Ok(true);
} else if sig.IsRealtime() {
let q = &mut self.rtSignals[sig.Index() - 31];
self.pendingSet.Add(sig);
return Ok(q.Enque(info, timer));
} else {
return Err(Error::InvalidInput)
}
}
pub fn HasSignal(&self, mask: SignalSet) -> bool {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return false
}
return true;
}
pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> {
let set = SignalSet(self.pendingSet.0 &!(mask.0));
if set.0 == 0 {
return None
}
let lastOne = set.TailingZero();
if lastOne < STD_SIGNAL_COUNT {
self.pendingSet.0 &=!(1 << lastOne);
let ps = self.stdSignals[lastOne].take();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 {
self.pendingSet.0 &=!(1 << lastOne);
}
let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque();
if let Some(ps) = ps {
let mut sigInfo = ps.sigInfo;
match ps.timer {
None => (),
Some(timer) => {
timer.lock().updateDequeuedSignalLocked(&mut sigInfo)
}
}
return Some(sigInfo);
} else {
return None;
}
}
pub fn Discard(&mut self, sig: Signal) {
self.pendingSet.0 &=!(1 << sig.Index());
if sig.0 <= STD_SIGNAL_COUNT as i32 {
self.stdSignals[sig.Index()] = None;
return
}
self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear()
}
}
#[derive(Default, Debug)]
pub struct SignalStruct {
pendingSignals: PendingSignals,
signalMask: SignalSet,
realSignalMask: SignalSet,
//sigtimedwait
groupStopPending: bool,
groupStopAck: bool,
trapStopPending: bool,
}
// https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct SignalStack {
pub addr: u64,
pub flags: u32,
pub size: u64,
}
impl Default for SignalStack {
fn default() -> Self {
return Self {
addr: 0,
flags: Self::FLAG_DISABLE,
size: 0,
}
}
}
impl SignalStack {
pub const FLAG_ON_STACK: u32 = 1;
pub const FLAG_DISABLE: u32 = 2;
pub fn Contains(&self, sp: u64) -> bool {
return self.addr < sp && sp <= self.addr + self.size
}
pub fn SetOnStack(&mut self) {
self.flags |= Self::FLAG_ON_STACK;
}
pub fn IsEnable(&self) -> bool {
return self.flags & Self::FLAG_DISABLE == 0
}
pub fn Top(&self) -> u64 {
return self.addr + self.size
}
}
pub struct SigHow {}
impl SigHow {
pub const SIG_BLOCK: u64 = 0;
pub const SIG_UNBLOCK: u64 = 1;
pub const SIG_SETMASK: u64 = 2;
}
pub fn SignalInfoPriv(sig: i32) -> SignalInfo {
return SignalInfo {
Signo: sig,
Code: SignalInfo::SIGNAL_INFO_KERNEL,
..Default::default()
}
}
// Sigevent represents struct sigevent.
#[repr(C)]
#[derive(Default, Copy, Clone)]
pub struct Sigevent {
pub Value: u64,
pub Signo: i32,
pub Notify: i32,
pub Tid: i32,
// struct sigevent here contains 48-byte union _sigev_un. However, only
// member _tid is significant to the kernel.
pub UnRemainder1: [u8; 32],
pub UnRemainder: [u8; 12],
}
pub const SIGEV_SIGNAL: i32 = 0;
pub const SIGEV_NONE: i32 = 1;
pub const SIGEV_THREAD: i32 = 2;
pub const SIGEV_THREAD_ID: i32 = 4;
// copyInSigSetWithSize copies in a structure as below
//
// struct {
// const sigset_t *ss; /* Pointer to signal set */
// size_t ss_len; /* Size (in bytes) of object pointed to by'ss' */
// };
//
// and returns sigset_addr and size.
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct SigMask {
pub addr: u64,
pub len: usize,
}
pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)> | {
let mask : SigMask = task.CopyInObj(addr)?;
return Ok((mask.addr, mask.len))
} | identifier_body |
|
lib.rs | , event: Self::TimerEvent);
fn access_device(&mut self) -> &mut Device;
}
pub struct MinstrelTimer {
timer: wlan_common::timer::Timer<()>,
current_timer: Option<common::timer::EventId>,
}
impl minstrel::TimerManager for MinstrelTimer {
fn schedule(&mut self, from_now: Duration) {
self.current_timer.replace(self.timer.schedule_after(from_now.into(), ()));
}
fn cancel(&mut self) {
self.current_timer.take();
}
}
type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>;
// We support a fake MLME internal representation that allows tests written in C++ to manually
// tweak the system time.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
enum MlmeHandleInternal {
Real {
join_handle: std::thread::JoinHandle<()>,
},
Fake {
executor: fasync::TestExecutor,
future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>,
},
}
/// MlmeHandle is the only access we have to our MLME after spinning it off into its own
/// event loop thread.
pub struct MlmeHandle {
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
internal: Option<MlmeHandleInternal>,
}
impl MlmeHandle {
pub fn stop(&mut self) {
if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) {
error!("Cannot signal MLME event loop thread: {}", e);
}
match self.internal.take() {
Some(MlmeHandleInternal::Real { join_handle }) => {
// This unwrap will only fail if the thread panics.
if let Err(e) = join_handle.join() {
error!("MLME event loop thread panicked: {:?}", e);
}
}
Some(MlmeHandleInternal::Fake { mut executor, mut future }) => {
// Verify that our main thread would exit now.
assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready());
}
None => warn!("Called stop on already stopped MLME"),
}
}
pub fn delete(mut self) {
if self.internal.is_some() {
warn!("Called delete on MlmeHandle before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool |
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T:'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
));
let _ = executor.run_until_stalled(&mut future.as_mut());
startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
}
};
let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation);
let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel);
let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() {
Ok(res) => res,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender
.send(Err(anyhow!("Failed to get MLME request stream: {}", e)))
.unwrap();
return;
}
};
let device_mac_sublayer_support = device.mac_sublayer_support();
let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer();
let update_interval = if device_mac_sublayer_support.device.is_synthetic {
MINSTREL_UPDATE_INTERVAL_HW_SIM
} else {
MINSTREL_UPDATE_INTERVAL
};
let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) {
let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None };
let probe_sequence = probe_sequence::ProbeSequence::random_new();
Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new(
timer_manager,
update_interval,
probe_sequence,
))))
} else {
None
};
let new_device = Device::new(device, minstrel.clone(), control_handle);
let (timer, time_stream) = common::timer::create_timer();
let mlme_impl = T::new(config, new_device, buf_provider, timer);
let mlme = Self {
mlme_impl,
minstrel,
mlme_request_stream,
driver_event_stream,
time_stream,
minstrel_time_stream,
};
// Startup is complete. Signal the main thread to proceed.
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Ok(())).unwrap();
let result = Self::run_main_loop(mlme).await;
match result {
Ok(()) => info!("MLME event loop exited gracefully."),
Err(e) => error!("MLME event loop exited with error: {:?}", e),
}
}
/// Begin processing MLME events.
/// Does not return until iface destruction is requested via DriverEvent::Stop, unless
/// a critical error occurs. Note that MlmeHandle::stop will work in either case.
pub async fn run_main_loop(mut self) -> Result<(), Error> {
let mut timer_stream =
common::timer::make_async_timed_event_stream(self.time_stream).fuse();
let mut minstrel_timer_stream =
common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse();
loop {
select! {
// Process requests from SME.
mlme_request = self.mlme_request_stream.next() => match mlme_request {
Some(req) => {
match req {
Ok(req) => {
let method_name = req.method_name();
if let Err(e) = self.mlme_impl.handle_mlme_message(req) {
info!("Failed to handle mlme {} request: {}", method_name, e);
}
}
Err(e) => {
info!("Failure while receiving mlme request: {}", e);
}
}
}
None => bail!("MLME request stream terminated unexpectedly."),
},
// Process requests from our C++ drivers.
driver_event = self.driver_event_stream.next() => match driver_event {
Some(event) => match event {
// DriverEvent::Stop indicates a safe shutdown.
DriverEvent::Stop => return Ok(()),
DriverEvent::MacFrameRx { bytes, rx_info } => {
self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info);
}
DriverEvent::EthFrameTx { bytes } => {
if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) {
// TODO(fxbug.dev/45464): Keep a counter of these failures.
info!("Failed to handle eth frame: {}", e);
}
}
DriverEvent::ScanComplete { status, scan_id } => {
self.mlme_impl.handle_scan_complete(status, scan_id)
},
DriverEvent::TxStatusReport { tx_status } => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_tx_status_report(&tx_status)
}
}
DriverEvent::Status { status } => {
self.mlme_impl.access_device().set_eth_status(status)
}
},
None => bail!("Driver event stream terminated unexpectedly."),
},
timed_event = timer_stream.select_next_some() => {
self.mlme_impl.handle_timeout(timed_event.id, timed_event.event);
}
_minstrel_timeout = minstrel_timer_stream.select_next_some() => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_timeout()
}
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use {
super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo,
banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream,
std::default::Default,
};
#[derive(Copy, Clone, Debug)]
pub struct MockWlanRxInfo {
pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags,
pub valid_fields: u32,
pub phy: banjo_common::WlanPhyType,
pub data_rate: u32,
pub channel: banjo_common::WlanChannel,
pub mcs: u8,
pub rssi_dbm: i8,
pub snr_dbh: i16,
}
impl Default for MockWlanRxInfo {
fn default() -> Self {
Self {
valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0
| banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0
| banjo_wlan_associnfo::WlanRxInfoValid::SNR.0,
channel: banjo_common::WlanChannel {
primary: 1,
cbw: banjo_common::ChannelBandwidth::CBW20,
secondary80: 0,
},
rssi_dbm: -40,
snr_dbh: 35,
// Default to 0 for these fields since there are no
// other reasonable values to mock.
rx_flags: banjo_wlan | {
mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported
} | identifier_body |
lib.rs | t mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool {
mac_sublayer.device.tx_status_report_supported &&!mac_sublayer.rate_selection_offload.supported
}
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T:'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
));
let _ = executor.run_until_stalled(&mut future.as_mut());
startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
}
};
let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation);
let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel);
let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() {
Ok(res) => res,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender
.send(Err(anyhow!("Failed to get MLME request stream: {}", e)))
.unwrap();
return;
}
};
let device_mac_sublayer_support = device.mac_sublayer_support();
let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer();
let update_interval = if device_mac_sublayer_support.device.is_synthetic {
MINSTREL_UPDATE_INTERVAL_HW_SIM
} else {
MINSTREL_UPDATE_INTERVAL
};
let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) {
let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None };
let probe_sequence = probe_sequence::ProbeSequence::random_new();
Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new(
timer_manager,
update_interval,
probe_sequence,
))))
} else {
None
};
let new_device = Device::new(device, minstrel.clone(), control_handle);
let (timer, time_stream) = common::timer::create_timer();
let mlme_impl = T::new(config, new_device, buf_provider, timer);
let mlme = Self {
mlme_impl,
minstrel,
mlme_request_stream,
driver_event_stream,
time_stream,
minstrel_time_stream,
};
// Startup is complete. Signal the main thread to proceed.
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Ok(())).unwrap();
let result = Self::run_main_loop(mlme).await;
match result {
Ok(()) => info!("MLME event loop exited gracefully."),
Err(e) => error!("MLME event loop exited with error: {:?}", e),
}
}
/// Begin processing MLME events.
/// Does not return until iface destruction is requested via DriverEvent::Stop, unless
/// a critical error occurs. Note that MlmeHandle::stop will work in either case.
pub async fn run_main_loop(mut self) -> Result<(), Error> {
let mut timer_stream =
common::timer::make_async_timed_event_stream(self.time_stream).fuse();
let mut minstrel_timer_stream =
common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse();
loop {
select! {
// Process requests from SME.
mlme_request = self.mlme_request_stream.next() => match mlme_request {
Some(req) => {
match req {
Ok(req) => {
let method_name = req.method_name();
if let Err(e) = self.mlme_impl.handle_mlme_message(req) {
info!("Failed to handle mlme {} request: {}", method_name, e);
}
}
Err(e) => {
info!("Failure while receiving mlme request: {}", e);
}
}
}
None => bail!("MLME request stream terminated unexpectedly."),
},
// Process requests from our C++ drivers.
driver_event = self.driver_event_stream.next() => match driver_event {
Some(event) => match event {
// DriverEvent::Stop indicates a safe shutdown.
DriverEvent::Stop => return Ok(()),
DriverEvent::MacFrameRx { bytes, rx_info } => {
self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info);
}
DriverEvent::EthFrameTx { bytes } => {
if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) {
// TODO(fxbug.dev/45464): Keep a counter of these failures.
info!("Failed to handle eth frame: {}", e);
}
}
DriverEvent::ScanComplete { status, scan_id } => {
self.mlme_impl.handle_scan_complete(status, scan_id)
},
DriverEvent::TxStatusReport { tx_status } => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_tx_status_report(&tx_status)
}
}
DriverEvent::Status { status } => {
self.mlme_impl.access_device().set_eth_status(status)
}
},
None => bail!("Driver event stream terminated unexpectedly."),
},
timed_event = timer_stream.select_next_some() => {
self.mlme_impl.handle_timeout(timed_event.id, timed_event.event);
}
_minstrel_timeout = minstrel_timer_stream.select_next_some() => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_timeout()
}
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use {
super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo,
banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream,
std::default::Default,
};
#[derive(Copy, Clone, Debug)]
pub struct MockWlanRxInfo {
pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags,
pub valid_fields: u32,
pub phy: banjo_common::WlanPhyType,
pub data_rate: u32,
pub channel: banjo_common::WlanChannel,
pub mcs: u8,
pub rssi_dbm: i8,
pub snr_dbh: i16,
}
impl Default for MockWlanRxInfo {
fn default() -> Self {
Self {
valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0
| banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0
| banjo_wlan_associnfo::WlanRxInfoValid::SNR.0,
channel: banjo_common::WlanChannel {
primary: 1,
cbw: banjo_common::ChannelBandwidth::CBW20,
secondary80: 0,
},
rssi_dbm: -40,
snr_dbh: 35,
// Default to 0 for these fields since there are no
// other reasonable values to mock.
rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0),
phy: banjo_common::WlanPhyType::DSSS,
data_rate: 0,
mcs: 0,
}
}
}
impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo {
fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo {
banjo_wlan_softmac::WlanRxInfo {
rx_flags: mock_rx_info.rx_flags,
valid_fields: mock_rx_info.valid_fields,
phy: mock_rx_info.phy,
data_rate: mock_rx_info.data_rate,
channel: mock_rx_info.channel,
mcs: mock_rx_info.mcs,
rssi_dbm: mock_rx_info.rssi_dbm,
snr_dbh: mock_rx_info.snr_dbh,
}
}
}
pub(crate) fn fake_control_handle(
// We use this unused parameter to ensure that an executor exists.
_exec: &fuchsia_async::TestExecutor,
) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) {
let (c1, c2) = fuchsia_zircon::Channel::create().unwrap();
let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap();
let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1);
let control_handle = request_stream.control_handle();
(control_handle, c2)
}
pub struct FakeMlme {
device: Device,
}
impl MlmeImpl for FakeMlme {
type Config = ();
type TimerEvent = ();
fn new(
_config: Self::Config,
device: Device,
_buf_provider: buffer::BufferProvider,
_scheduler: common::timer::Timer<Self::TimerEvent>,
) -> Self {
Self { device }
}
fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> {
unimplemented!()
}
fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) {
unimplemented!()
}
fn | handle_eth_frame_tx | identifier_name |
|
lib.rs | , event: Self::TimerEvent);
fn access_device(&mut self) -> &mut Device;
}
pub struct MinstrelTimer {
timer: wlan_common::timer::Timer<()>,
current_timer: Option<common::timer::EventId>,
}
impl minstrel::TimerManager for MinstrelTimer {
fn schedule(&mut self, from_now: Duration) {
self.current_timer.replace(self.timer.schedule_after(from_now.into(), ()));
}
fn cancel(&mut self) {
self.current_timer.take();
}
}
type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>;
// We support a fake MLME internal representation that allows tests written in C++ to manually
// tweak the system time.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
enum MlmeHandleInternal {
Real {
join_handle: std::thread::JoinHandle<()>,
},
Fake {
executor: fasync::TestExecutor,
future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>,
},
}
/// MlmeHandle is the only access we have to our MLME after spinning it off into its own
/// event loop thread.
pub struct MlmeHandle {
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
internal: Option<MlmeHandleInternal>,
}
impl MlmeHandle {
pub fn stop(&mut self) {
if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) {
error!("Cannot signal MLME event loop thread: {}", e);
}
match self.internal.take() {
Some(MlmeHandleInternal::Real { join_handle }) => {
// This unwrap will only fail if the thread panics.
if let Err(e) = join_handle.join() {
error!("MLME event loop thread panicked: {:?}", e);
}
}
Some(MlmeHandleInternal::Fake { mut executor, mut future }) => {
// Verify that our main thread would exit now.
assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready());
}
None => warn!("Called stop on already stopped MLME"),
}
}
pub fn delete(mut self) {
if self.internal.is_some() {
warn!("Called delete on MlmeHandle before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool {
mac_sublayer.device.tx_status_report_supported &&!mac_sublayer.rate_selection_offload.supported
}
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T:'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
));
let _ = executor.run_until_stalled(&mut future.as_mut());
startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => |
};
let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation);
let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel);
let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() {
Ok(res) => res,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender
.send(Err(anyhow!("Failed to get MLME request stream: {}", e)))
.unwrap();
return;
}
};
let device_mac_sublayer_support = device.mac_sublayer_support();
let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer();
let update_interval = if device_mac_sublayer_support.device.is_synthetic {
MINSTREL_UPDATE_INTERVAL_HW_SIM
} else {
MINSTREL_UPDATE_INTERVAL
};
let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) {
let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None };
let probe_sequence = probe_sequence::ProbeSequence::random_new();
Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new(
timer_manager,
update_interval,
probe_sequence,
))))
} else {
None
};
let new_device = Device::new(device, minstrel.clone(), control_handle);
let (timer, time_stream) = common::timer::create_timer();
let mlme_impl = T::new(config, new_device, buf_provider, timer);
let mlme = Self {
mlme_impl,
minstrel,
mlme_request_stream,
driver_event_stream,
time_stream,
minstrel_time_stream,
};
// Startup is complete. Signal the main thread to proceed.
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Ok(())).unwrap();
let result = Self::run_main_loop(mlme).await;
match result {
Ok(()) => info!("MLME event loop exited gracefully."),
Err(e) => error!("MLME event loop exited with error: {:?}", e),
}
}
/// Begin processing MLME events.
/// Does not return until iface destruction is requested via DriverEvent::Stop, unless
/// a critical error occurs. Note that MlmeHandle::stop will work in either case.
pub async fn run_main_loop(mut self) -> Result<(), Error> {
let mut timer_stream =
common::timer::make_async_timed_event_stream(self.time_stream).fuse();
let mut minstrel_timer_stream =
common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse();
loop {
select! {
// Process requests from SME.
mlme_request = self.mlme_request_stream.next() => match mlme_request {
Some(req) => {
match req {
Ok(req) => {
let method_name = req.method_name();
if let Err(e) = self.mlme_impl.handle_mlme_message(req) {
info!("Failed to handle mlme {} request: {}", method_name, e);
}
}
Err(e) => {
info!("Failure while receiving mlme request: {}", e);
}
}
}
None => bail!("MLME request stream terminated unexpectedly."),
},
// Process requests from our C++ drivers.
driver_event = self.driver_event_stream.next() => match driver_event {
Some(event) => match event {
// DriverEvent::Stop indicates a safe shutdown.
DriverEvent::Stop => return Ok(()),
DriverEvent::MacFrameRx { bytes, rx_info } => {
self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info);
}
DriverEvent::EthFrameTx { bytes } => {
if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) {
// TODO(fxbug.dev/45464): Keep a counter of these failures.
info!("Failed to handle eth frame: {}", e);
}
}
DriverEvent::ScanComplete { status, scan_id } => {
self.mlme_impl.handle_scan_complete(status, scan_id)
},
DriverEvent::TxStatusReport { tx_status } => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_tx_status_report(&tx_status)
}
}
DriverEvent::Status { status } => {
self.mlme_impl.access_device().set_eth_status(status)
}
},
None => bail!("Driver event stream terminated unexpectedly."),
},
timed_event = timer_stream.select_next_some() => {
self.mlme_impl.handle_timeout(timed_event.id, timed_event.event);
}
_minstrel_timeout = minstrel_timer_stream.select_next_some() => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_timeout()
}
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use {
super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo,
banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream,
std::default::Default,
};
#[derive(Copy, Clone, Debug)]
pub struct MockWlanRxInfo {
pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags,
pub valid_fields: u32,
pub phy: banjo_common::WlanPhyType,
pub data_rate: u32,
pub channel: banjo_common::WlanChannel,
pub mcs: u8,
pub rssi_dbm: i8,
pub snr_dbh: i16,
}
impl Default for MockWlanRxInfo {
fn default() -> Self {
Self {
valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0
| banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0
| banjo_wlan_associnfo::WlanRxInfoValid::SNR.0,
channel: banjo_common::WlanChannel {
primary: 1,
cbw: banjo_common::ChannelBandwidth::CBW20,
secondary80: 0,
},
rssi_dbm: -40,
snr_dbh: 35,
// Default to 0 for these fields since there are no
// other reasonable values to mock.
rx_flags: banjo_wlan | {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
} | conditional_block |
lib.rs | EventId, event: Self::TimerEvent);
fn access_device(&mut self) -> &mut Device;
}
pub struct MinstrelTimer {
timer: wlan_common::timer::Timer<()>,
current_timer: Option<common::timer::EventId>,
}
impl minstrel::TimerManager for MinstrelTimer {
fn schedule(&mut self, from_now: Duration) {
self.current_timer.replace(self.timer.schedule_after(from_now.into(), ()));
}
fn cancel(&mut self) {
self.current_timer.take();
}
}
type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>;
// We support a fake MLME internal representation that allows tests written in C++ to manually
// tweak the system time.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
enum MlmeHandleInternal {
Real {
join_handle: std::thread::JoinHandle<()>,
},
Fake {
executor: fasync::TestExecutor,
future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>,
},
}
/// MlmeHandle is the only access we have to our MLME after spinning it off into its own
/// event loop thread.
pub struct MlmeHandle {
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
internal: Option<MlmeHandleInternal>,
}
impl MlmeHandle {
pub fn stop(&mut self) {
if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) {
error!("Cannot signal MLME event loop thread: {}", e);
}
match self.internal.take() {
Some(MlmeHandleInternal::Real { join_handle }) => {
// This unwrap will only fail if the thread panics.
if let Err(e) = join_handle.join() {
error!("MLME event loop thread panicked: {:?}", e);
}
}
Some(MlmeHandleInternal::Fake { mut executor, mut future }) => {
// Verify that our main thread would exit now.
assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready());
}
None => warn!("Called stop on already stopped MLME"),
}
}
pub fn delete(mut self) {
if self.internal.is_some() {
warn!("Called delete on MlmeHandle before calling stop.");
self.stop()
}
}
pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> {
self.driver_event_sink
.unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() })
.map_err(|e| e.into())
}
// Fns used to interact with an MLME running in test mode.
// TODO(fxbug.dev/45464): Remove when tests are all in Rust.
pub fn advance_fake_time(&mut self, nanos: i64) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called advance_fake_time on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let time = executor.now();
executor.set_fake_time(time + fasync::Duration::from_nanos(nanos));
executor.wake_expired_timers();
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called advance_fake_time on stopped MLME"),
}
}
pub fn run_until_stalled(&mut self) {
match &mut self.internal {
Some(MlmeHandleInternal::Real {.. }) => {
panic!("Called run_until_stalled on a real MLME")
}
Some(MlmeHandleInternal::Fake { executor, future }) => {
let _ = executor.run_until_stalled(&mut future.as_mut());
}
None => panic!("Called run_until_stalled on stopped MLME"),
}
}
}
// DriverEventSink is used by other devices to interact with our main loop thread. All
// events from our ethernet device or vendor device are converted to DriverEvents
// and sent through this sink, where they can then be handled serially. Multiple copies of
// DriverEventSink may be safely passed between threads, including one that is used by our
// vendor driver as the context for wlan_softmac_ifc_protocol_ops.
struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>);
// TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame.
pub enum DriverEvent {
// Indicates that the device is being removed and our main loop should exit.
Stop,
// TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest.
// Indicates receipt of a MAC frame from a peer.
MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo },
// Requests transmission of an ethernet frame over the air.
EthFrameTx { bytes: Vec<u8> },
// Reports a scan is complete.
ScanComplete { status: zx::Status, scan_id: u64 },
// Reports the result of an attempted frame transmission.
TxStatusReport { tx_status: banjo_common::WlanTxStatus },
// Reports the current status of the vendor driver.
Status { status: u32 },
}
pub struct Mlme<T: MlmeImpl> {
mlme_impl: T,
minstrel: Option<MinstrelWrapper>,
// A stream of requests coming from the parent SME of this MLME.
mlme_request_stream: fidl_mlme::MlmeRequestStream,
// A stream of events initiated by C++ device drivers and then buffered here
// by our MlmeHandle.
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
time_stream: common::timer::TimeStream<T::TimerEvent>,
minstrel_time_stream: common::timer::TimeStream<()>,
}
fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool {
mac_sublayer.device.tx_status_report_supported &&!mac_sublayer.rate_selection_offload.supported
}
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100);
// Remedy for fxbug.dev/8165 (fxbug.dev/33151)
// See |DATA_FRAME_INTERVAL_NANOS|
// in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs
// Ensure at least one probe frame (generated every 16 data frames)
// in every cycle:
// 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32.
const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83);
// Require a static lifetime so we can move this MLME into an event loop task.
impl<T:'static + MlmeImpl> Mlme<T> {
pub fn start(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> Result<MlmeHandle, Error> {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
// This sink is used both by the inderlying iface to forward up driver events, as well
// as via the MlmeHandle to send ethernet frames and terminate MLME.
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, startup_receiver) = oneshot::channel();
// Everything else happens in a new thread so that we can switch into an async context
// without requiring all parts of MLME to impl Send.
let join_handle = std::thread::spawn(move || {
info!("Starting WLAN MLME main loop");
let mut executor = fasync::LocalExecutor::new().unwrap();
let future = Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender,
);
executor.run_singlethreaded(future);
});
let mut executor = fasync::LocalExecutor::new().unwrap();
let startup_result = executor.run_singlethreaded(startup_receiver);
match startup_result.map_err(|e| Error::from(e)) {
Ok(Ok(())) => Ok(MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Real { join_handle }),
}),
Err(err) | Ok(Err(err)) => match join_handle.join() {
Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err),
Err(panic_err) => {
bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err)
}
},
}
}
// Create an MLME in a test configuration. This MLME will never do anything unless it's progressed
// using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled.
pub fn start_test(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
) -> MlmeHandle {
let executor = fasync::TestExecutor::new_with_fake_time().unwrap();
Self::start_test_with_executor(config, device, buf_provider, executor)
}
pub fn start_test_with_executor(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
mut executor: fasync::TestExecutor,
) -> MlmeHandle {
let (driver_event_sink, driver_event_stream) = mpsc::unbounded();
let driver_event_sink_clone = driver_event_sink.clone();
let (startup_sender, mut startup_receiver) = oneshot::channel();
let mut future = Box::pin(Self::main_loop_thread(
config,
device,
buf_provider,
driver_event_sink_clone,
driver_event_stream,
startup_sender, | startup_receiver
.try_recv()
.unwrap()
.expect("Test MLME setup stalled.")
.expect("Test MLME setup failed.");
MlmeHandle {
driver_event_sink,
internal: Some(MlmeHandleInternal::Fake { executor, future }),
}
}
async fn main_loop_thread(
config: T::Config,
device: DeviceInterface,
buf_provider: buffer::BufferProvider,
driver_event_sink: mpsc::UnboundedSender<DriverEvent>,
driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>,
startup_sender: oneshot::Sender<Result<(), Error>>,
) {
let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink));
let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut());
// Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the
// driver before we start our MLME will be safely buffered in our driver_event_sink.
// Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine.
// The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME.
let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) {
Ok(handle) => handle,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap();
return;
}
};
let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation);
let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel);
let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() {
Ok(res) => res,
Err(e) => {
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender
.send(Err(anyhow!("Failed to get MLME request stream: {}", e)))
.unwrap();
return;
}
};
let device_mac_sublayer_support = device.mac_sublayer_support();
let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer();
let update_interval = if device_mac_sublayer_support.device.is_synthetic {
MINSTREL_UPDATE_INTERVAL_HW_SIM
} else {
MINSTREL_UPDATE_INTERVAL
};
let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) {
let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None };
let probe_sequence = probe_sequence::ProbeSequence::random_new();
Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new(
timer_manager,
update_interval,
probe_sequence,
))))
} else {
None
};
let new_device = Device::new(device, minstrel.clone(), control_handle);
let (timer, time_stream) = common::timer::create_timer();
let mlme_impl = T::new(config, new_device, buf_provider, timer);
let mlme = Self {
mlme_impl,
minstrel,
mlme_request_stream,
driver_event_stream,
time_stream,
minstrel_time_stream,
};
// Startup is complete. Signal the main thread to proceed.
// Failure to unwrap indicates a critical failure in the driver init thread.
startup_sender.send(Ok(())).unwrap();
let result = Self::run_main_loop(mlme).await;
match result {
Ok(()) => info!("MLME event loop exited gracefully."),
Err(e) => error!("MLME event loop exited with error: {:?}", e),
}
}
/// Begin processing MLME events.
/// Does not return until iface destruction is requested via DriverEvent::Stop, unless
/// a critical error occurs. Note that MlmeHandle::stop will work in either case.
pub async fn run_main_loop(mut self) -> Result<(), Error> {
let mut timer_stream =
common::timer::make_async_timed_event_stream(self.time_stream).fuse();
let mut minstrel_timer_stream =
common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse();
loop {
select! {
// Process requests from SME.
mlme_request = self.mlme_request_stream.next() => match mlme_request {
Some(req) => {
match req {
Ok(req) => {
let method_name = req.method_name();
if let Err(e) = self.mlme_impl.handle_mlme_message(req) {
info!("Failed to handle mlme {} request: {}", method_name, e);
}
}
Err(e) => {
info!("Failure while receiving mlme request: {}", e);
}
}
}
None => bail!("MLME request stream terminated unexpectedly."),
},
// Process requests from our C++ drivers.
driver_event = self.driver_event_stream.next() => match driver_event {
Some(event) => match event {
// DriverEvent::Stop indicates a safe shutdown.
DriverEvent::Stop => return Ok(()),
DriverEvent::MacFrameRx { bytes, rx_info } => {
self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info);
}
DriverEvent::EthFrameTx { bytes } => {
if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) {
// TODO(fxbug.dev/45464): Keep a counter of these failures.
info!("Failed to handle eth frame: {}", e);
}
}
DriverEvent::ScanComplete { status, scan_id } => {
self.mlme_impl.handle_scan_complete(status, scan_id)
},
DriverEvent::TxStatusReport { tx_status } => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_tx_status_report(&tx_status)
}
}
DriverEvent::Status { status } => {
self.mlme_impl.access_device().set_eth_status(status)
}
},
None => bail!("Driver event stream terminated unexpectedly."),
},
timed_event = timer_stream.select_next_some() => {
self.mlme_impl.handle_timeout(timed_event.id, timed_event.event);
}
_minstrel_timeout = minstrel_timer_stream.select_next_some() => {
if let Some(minstrel) = self.minstrel.as_ref() {
minstrel.lock().handle_timeout()
}
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use {
super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo,
banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream,
std::default::Default,
};
#[derive(Copy, Clone, Debug)]
pub struct MockWlanRxInfo {
pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags,
pub valid_fields: u32,
pub phy: banjo_common::WlanPhyType,
pub data_rate: u32,
pub channel: banjo_common::WlanChannel,
pub mcs: u8,
pub rssi_dbm: i8,
pub snr_dbh: i16,
}
impl Default for MockWlanRxInfo {
fn default() -> Self {
Self {
valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0
| banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0
| banjo_wlan_associnfo::WlanRxInfoValid::SNR.0,
channel: banjo_common::WlanChannel {
primary: 1,
cbw: banjo_common::ChannelBandwidth::CBW20,
secondary80: 0,
},
rssi_dbm: -40,
snr_dbh: 35,
// Default to 0 for these fields since there are no
// other reasonable values to mock.
rx_flags: banjo_wlan_soft | ));
let _ = executor.run_until_stalled(&mut future.as_mut()); | random_line_split |
lib.rs | // Copyright (C) 2021 Subspace Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pallet transporter used to move funds between chains.
#![cfg_attr(not(feature = "std"), no_std)]
#![forbid(unsafe_code)]
#![warn(rust_2018_idioms, missing_debug_implementations)]
use codec::{Decode, Encode};
use domain_runtime_primitives::{MultiAccountId, TryConvertBack};
use frame_support::traits::Currency;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_messenger::messages::ChainId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
/// Location that either sends or receives transfers between chains.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Location {
/// Unique identity of chain.
pub chain_id: ChainId,
/// Unique account on chain.
pub account_id: MultiAccountId,
}
/// Transfer of funds from one chain to another.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Transfer<Balance> {
/// Amount being transferred between entities.
pub amount: Balance,
/// Sender location of the transfer.
pub sender: Location,
/// Receiver location of the transfer.
pub receiver: Location,
}
/// Balance type used by the pallet.
pub(crate) type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
type MessageIdOf<T> = <<T as Config>::Sender as sp_messenger::endpoint::Sender<
<T as frame_system::Config>::AccountId,
>>::MessageId;
#[frame_support::pallet]
mod pallet {
use crate::weights::WeightInfo;
use crate::{BalanceOf, Location, MessageIdOf, MultiAccountId, Transfer, TryConvertBack};
use codec::{Decode, Encode};
use frame_support::pallet_prelude::*;
use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReasons};
use frame_support::weights::Weight;
use frame_system::pallet_prelude::*;
use sp_messenger::endpoint::{
Endpoint, EndpointHandler as EndpointHandlerT, EndpointId, EndpointRequest,
EndpointResponse, Sender,
};
use sp_messenger::messages::ChainId;
use sp_runtime::traits::Convert;
use sp_std::vec;
#[pallet::config]
pub trait Config: frame_system::Config {
/// Event type for this pallet.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Gets the chain_id of the current execution environment.
type SelfChainId: Get<ChainId>;
/// Gets the endpoint_id of the this pallet in a given execution environment.
type SelfEndpointId: Get<EndpointId>;
/// Currency used by this pallet.
type Currency: Currency<Self::AccountId>;
/// Sender used to transfer funds.
type Sender: Sender<Self::AccountId>;
/// MultiAccountID <> T::AccountId converter.
type AccountIdConverter: TryConvertBack<Self::AccountId, MultiAccountId>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
/// Pallet transporter to move funds between chains.
#[pallet::pallet]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);
/// All the outgoing transfers on this execution environment.
#[pallet::storage]
#[pallet::getter(fn outgoing_transfers)]
pub(super) type OutgoingTransfers<T: Config> = StorageDoubleMap<
_,
Identity,
ChainId,
Identity,
MessageIdOf<T>,
Transfer<BalanceOf<T>>,
OptionQuery,
>;
/// Events emitted by pallet-transporter.
#[pallet::event]
#[pallet::generate_deposit(pub (super) fn deposit_event)]
pub enum Event<T: Config> {
/// Emits when there is a new outgoing transfer.
OutgoingTransferInitiated {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given outgoing transfer was failed on dst_chain.
OutgoingTransferFailed {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
/// Error from dst_chain endpoint.
err: DispatchError,
},
/// Emits when a given outgoing transfer was successful.
OutgoingTransferSuccessful {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given incoming transfer was successfully processed.
IncomingTransferSuccessful {
/// Source chain the transfer is coming from.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
}
/// Errors emitted by pallet-transporter.
#[pallet::error]
pub enum Error<T> {
/// Emits when the account has low balance to make a transfer.
LowBalance,
/// Failed to decode transfer payload.
InvalidPayload,
/// Emits when the request for a response received is missing.
MissingTransferRequest,
/// Emits when the request doesn't match the expected one..
InvalidTransferRequest,
/// Emits when the incoming message is not bound to this chain.
UnexpectedMessage,
/// Emits when the account id type is invalid.
InvalidAccountId,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Initiates transfer of funds from account on src_chain to account on dst_chain.
/// Funds are burned on src_chain first and are minted on dst_chain using Messenger.
#[pallet::call_index(0)]
#[pallet::weight((T::WeightInfo::transfer(), Pays::No))]
pub fn transfer(
origin: OriginFor<T>,
dst_location: Location,
amount: BalanceOf<T>,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
// burn transfer amount
T::Currency::withdraw(
&sender,
amount,
WithdrawReasons::TRANSFER,
ExistenceRequirement::AllowDeath,
)
.map_err(|_| Error::<T>::LowBalance)?;
// initiate transfer
let dst_chain_id = dst_location.chain_id;
let transfer = Transfer {
amount,
sender: Location {
chain_id: T::SelfChainId::get(),
account_id: T::AccountIdConverter::convert(sender.clone()),
},
receiver: dst_location,
};
// send message
let message_id = T::Sender::send_message(
&sender,
dst_chain_id,
EndpointRequest {
src_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
// destination endpoint must be transporter with same id
dst_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
payload: transfer.encode(),
},
)?;
OutgoingTransfers::<T>::insert(dst_chain_id, message_id, transfer);
Self::deposit_event(Event::<T>::OutgoingTransferInitiated {
chain_id: dst_chain_id,
message_id,
});
Ok(())
}
}
/// Endpoint handler implementation for pallet transporter.
#[derive(Debug)]
pub struct EndpointHandler<T>(pub PhantomData<T>);
impl<T: Config> EndpointHandlerT<MessageIdOf<T>> for EndpointHandler<T> {
fn message(
&self,
src_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
) -> EndpointResponse {
// ensure message is not from the self
ensure!(
T::SelfChainId::get()!= src_chain_id,
Error::<T>::InvalidTransferRequest
);
// check the endpoint id
ensure!(
req.dst_endpoint == Endpoint::Id(T::SelfEndpointId::get()),
Error::<T>::UnexpectedMessage
);
// decode payload and process message
let req = match Transfer::decode(&mut req.payload.as_slice()) {
Ok(req) => req,
Err(_) => return Err(Error::<T>::InvalidPayload.into()),
};
// mint the funds to dst_account
let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, req.amount);
frame_system::Pallet::<T>::deposit_event(Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::IncomingTransferSuccessful {
chain_id: src_chain_id,
message_id,
},
));
Ok(vec![])
}
fn | (&self) -> Weight {
T::WeightInfo::message()
}
fn message_response(
&self,
dst_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
resp: EndpointResponse,
) -> DispatchResult {
// ensure request is valid
let transfer = OutgoingTransfers::<T>::take(dst_chain_id, message_id)
.ok_or(Error::<T>::MissingTransferRequest)?;
ensure!(
req.payload == transfer.encode(),
Error::<T>::InvalidTransferRequest
);
// process response
match resp {
Ok(_) => {
// transfer is successful
frame_system::Pallet::<T>::deposit_event(
Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::OutgoingTransferSuccessful {
chain_id: dst_chain_id,
message_id,
},
),
);
}
Err(err) => {
// transfer failed
// revert burned funds
let account_id =
T::AccountIdConverter::try_convert_back(transfer.sender.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, transfer.amount);
frame_system::Pallet::<T>::deposit_event(
Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::OutgoingTransferFailed {
chain_id: dst_chain_id,
message_id,
err,
},
),
);
}
}
Ok(())
}
fn message_response_weight(&self) -> Weight {
T::WeightInfo::message_response()
}
}
}
| message_weight | identifier_name |
lib.rs | // Copyright (C) 2021 Subspace Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pallet transporter used to move funds between chains.
#![cfg_attr(not(feature = "std"), no_std)]
#![forbid(unsafe_code)]
#![warn(rust_2018_idioms, missing_debug_implementations)]
use codec::{Decode, Encode};
use domain_runtime_primitives::{MultiAccountId, TryConvertBack};
use frame_support::traits::Currency;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_messenger::messages::ChainId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
/// Location that either sends or receives transfers between chains.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Location {
/// Unique identity of chain.
pub chain_id: ChainId,
/// Unique account on chain.
pub account_id: MultiAccountId,
}
/// Transfer of funds from one chain to another.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Transfer<Balance> {
/// Amount being transferred between entities.
pub amount: Balance,
/// Sender location of the transfer.
pub sender: Location,
/// Receiver location of the transfer.
pub receiver: Location,
}
/// Balance type used by the pallet.
pub(crate) type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
type MessageIdOf<T> = <<T as Config>::Sender as sp_messenger::endpoint::Sender<
<T as frame_system::Config>::AccountId,
>>::MessageId;
#[frame_support::pallet]
mod pallet {
use crate::weights::WeightInfo;
use crate::{BalanceOf, Location, MessageIdOf, MultiAccountId, Transfer, TryConvertBack};
use codec::{Decode, Encode};
use frame_support::pallet_prelude::*;
use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReasons};
use frame_support::weights::Weight;
use frame_system::pallet_prelude::*;
use sp_messenger::endpoint::{
Endpoint, EndpointHandler as EndpointHandlerT, EndpointId, EndpointRequest,
EndpointResponse, Sender,
};
use sp_messenger::messages::ChainId;
use sp_runtime::traits::Convert;
use sp_std::vec;
#[pallet::config]
pub trait Config: frame_system::Config {
/// Event type for this pallet.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Gets the chain_id of the current execution environment.
type SelfChainId: Get<ChainId>;
/// Gets the endpoint_id of the this pallet in a given execution environment.
type SelfEndpointId: Get<EndpointId>;
/// Currency used by this pallet.
type Currency: Currency<Self::AccountId>;
/// Sender used to transfer funds.
type Sender: Sender<Self::AccountId>;
/// MultiAccountID <> T::AccountId converter.
type AccountIdConverter: TryConvertBack<Self::AccountId, MultiAccountId>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
/// Pallet transporter to move funds between chains.
#[pallet::pallet]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);
/// All the outgoing transfers on this execution environment.
#[pallet::storage]
#[pallet::getter(fn outgoing_transfers)]
pub(super) type OutgoingTransfers<T: Config> = StorageDoubleMap< | ChainId,
Identity,
MessageIdOf<T>,
Transfer<BalanceOf<T>>,
OptionQuery,
>;
/// Events emitted by pallet-transporter.
#[pallet::event]
#[pallet::generate_deposit(pub (super) fn deposit_event)]
pub enum Event<T: Config> {
/// Emits when there is a new outgoing transfer.
OutgoingTransferInitiated {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given outgoing transfer was failed on dst_chain.
OutgoingTransferFailed {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
/// Error from dst_chain endpoint.
err: DispatchError,
},
/// Emits when a given outgoing transfer was successful.
OutgoingTransferSuccessful {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given incoming transfer was successfully processed.
IncomingTransferSuccessful {
/// Source chain the transfer is coming from.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
}
/// Errors emitted by pallet-transporter.
#[pallet::error]
pub enum Error<T> {
/// Emits when the account has low balance to make a transfer.
LowBalance,
/// Failed to decode transfer payload.
InvalidPayload,
/// Emits when the request for a response received is missing.
MissingTransferRequest,
/// Emits when the request doesn't match the expected one..
InvalidTransferRequest,
/// Emits when the incoming message is not bound to this chain.
UnexpectedMessage,
/// Emits when the account id type is invalid.
InvalidAccountId,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Initiates transfer of funds from account on src_chain to account on dst_chain.
/// Funds are burned on src_chain first and are minted on dst_chain using Messenger.
#[pallet::call_index(0)]
#[pallet::weight((T::WeightInfo::transfer(), Pays::No))]
pub fn transfer(
origin: OriginFor<T>,
dst_location: Location,
amount: BalanceOf<T>,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
// burn transfer amount
T::Currency::withdraw(
&sender,
amount,
WithdrawReasons::TRANSFER,
ExistenceRequirement::AllowDeath,
)
.map_err(|_| Error::<T>::LowBalance)?;
// initiate transfer
let dst_chain_id = dst_location.chain_id;
let transfer = Transfer {
amount,
sender: Location {
chain_id: T::SelfChainId::get(),
account_id: T::AccountIdConverter::convert(sender.clone()),
},
receiver: dst_location,
};
// send message
let message_id = T::Sender::send_message(
&sender,
dst_chain_id,
EndpointRequest {
src_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
// destination endpoint must be transporter with same id
dst_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
payload: transfer.encode(),
},
)?;
OutgoingTransfers::<T>::insert(dst_chain_id, message_id, transfer);
Self::deposit_event(Event::<T>::OutgoingTransferInitiated {
chain_id: dst_chain_id,
message_id,
});
Ok(())
}
}
/// Endpoint handler implementation for pallet transporter.
#[derive(Debug)]
pub struct EndpointHandler<T>(pub PhantomData<T>);
impl<T: Config> EndpointHandlerT<MessageIdOf<T>> for EndpointHandler<T> {
fn message(
&self,
src_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
) -> EndpointResponse {
// ensure message is not from the self
ensure!(
T::SelfChainId::get()!= src_chain_id,
Error::<T>::InvalidTransferRequest
);
// check the endpoint id
ensure!(
req.dst_endpoint == Endpoint::Id(T::SelfEndpointId::get()),
Error::<T>::UnexpectedMessage
);
// decode payload and process message
let req = match Transfer::decode(&mut req.payload.as_slice()) {
Ok(req) => req,
Err(_) => return Err(Error::<T>::InvalidPayload.into()),
};
// mint the funds to dst_account
let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, req.amount);
frame_system::Pallet::<T>::deposit_event(Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::IncomingTransferSuccessful {
chain_id: src_chain_id,
message_id,
},
));
Ok(vec![])
}
fn message_weight(&self) -> Weight {
T::WeightInfo::message()
}
fn message_response(
&self,
dst_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
resp: EndpointResponse,
) -> DispatchResult {
// ensure request is valid
let transfer = OutgoingTransfers::<T>::take(dst_chain_id, message_id)
.ok_or(Error::<T>::MissingTransferRequest)?;
ensure!(
req.payload == transfer.encode(),
Error::<T>::InvalidTransferRequest
);
// process response
match resp {
Ok(_) => {
// transfer is successful
frame_system::Pallet::<T>::deposit_event(
Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::OutgoingTransferSuccessful {
chain_id: dst_chain_id,
message_id,
},
),
);
}
Err(err) => {
// transfer failed
// revert burned funds
let account_id =
T::AccountIdConverter::try_convert_back(transfer.sender.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, transfer.amount);
frame_system::Pallet::<T>::deposit_event(
Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::OutgoingTransferFailed {
chain_id: dst_chain_id,
message_id,
err,
},
),
);
}
}
Ok(())
}
fn message_response_weight(&self) -> Weight {
T::WeightInfo::message_response()
}
}
} | _,
Identity, | random_line_split |
local.rs | use super::{EntryType, ShrinkBehavior, GIGABYTES};
use std::collections::BinaryHeap;
use std::path::Path;
use std::sync::Arc;
use std::time::{self, Duration};
use bytes::Bytes;
use digest::{Digest as DigestTrait, FixedOutput};
use futures::future;
use hashing::{Digest, Fingerprint, EMPTY_DIGEST};
use lmdb::Error::NotFound;
use lmdb::{self, Cursor, Transaction};
use sha2::Sha256;
use sharded_lmdb::{ShardedLmdb, VersionedFingerprint, DEFAULT_LEASE_TIME};
#[derive(Clone)]
pub struct ByteStore {
inner: Arc<InnerStore>,
}
struct InnerStore {
// Store directories separately from files because:
// 1. They may have different lifetimes.
// 2. It's nice to know whether we should be able to parse something as a proto.
file_dbs: Result<Arc<ShardedLmdb>, String>,
directory_dbs: Result<Arc<ShardedLmdb>, String>,
executor: task_executor::Executor,
}
impl ByteStore {
pub fn new<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
) -> Result<ByteStore, String> {
Self::new_with_lease_time(executor, path, DEFAULT_LEASE_TIME)
}
pub fn new_with_lease_time<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
lease_time: Duration,
) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
// We want these stores to be allowed to grow very large, in case we are on a system with
// large disks which doesn't want to GC a lot.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in
// VIRT but not RSS). There is no practical upper bound on this number, so we set them
// ridiculously high.
// However! We set them lower than we'd otherwise choose because sometimes we see tests on
// travis fail because they can't allocate virtual memory, if there are multiple Stores
// in memory at the same time. We don't know why they're not efficiently garbage collected
// by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{
env
.begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block.
///
pub async fn load_bytes_with<T: Send +'static, F: Fn(&[u8]) -> T + Send + Sync +'static>(
&self,
entry_type: EntryType,
digest: Digest,
f: F,
) -> Result<Option<T>, String> {
if digest == EMPTY_DIGEST {
// Avoid I/O for this case. This allows some client-provided operations (like merging
// snapshots) to work without needing to first store the empty snapshot.
//
// To maintain the guarantee that the given function is called in a blocking context, we
// spawn it as a task.
return Ok(Some(self.executor().spawn_blocking(move || f(&[])).await));
}
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
dbs?.load_bytes_with(digest.0, move |bytes| {
if bytes.len() == digest.1 {
Ok(f(bytes))
} else |
}).await
}
pub fn all_digests(&self, entry_type: EntryType) -> Result<Vec<Digest>, String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let mut digests = vec![];
for &(ref env, ref database, ref _lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
digests.push(Digest(fingerprint, bytes.len()));
}
}
Ok(digests)
}
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct AgedFingerprint {
// expired_seconds_ago must be the first field for the Ord implementation.
expired_seconds_ago: u64,
fingerprint: Fingerprint,
size_bytes: usize,
entry_type: EntryType,
}
| {
Err(format!("Got hash collision reading from store - digest {:?} was requested, but retrieved bytes with that fingerprint had length {}. Congratulations, you may have broken sha256! Underlying bytes: {:?}", digest, bytes.len(), bytes))
} | conditional_block |
local.rs | use super::{EntryType, ShrinkBehavior, GIGABYTES};
use std::collections::BinaryHeap;
use std::path::Path;
use std::sync::Arc;
use std::time::{self, Duration};
use bytes::Bytes;
use digest::{Digest as DigestTrait, FixedOutput};
use futures::future;
use hashing::{Digest, Fingerprint, EMPTY_DIGEST};
use lmdb::Error::NotFound;
use lmdb::{self, Cursor, Transaction};
use sha2::Sha256;
use sharded_lmdb::{ShardedLmdb, VersionedFingerprint, DEFAULT_LEASE_TIME};
#[derive(Clone)]
pub struct ByteStore {
inner: Arc<InnerStore>,
}
struct InnerStore {
// Store directories separately from files because:
// 1. They may have different lifetimes.
// 2. It's nice to know whether we should be able to parse something as a proto.
file_dbs: Result<Arc<ShardedLmdb>, String>,
directory_dbs: Result<Arc<ShardedLmdb>, String>,
executor: task_executor::Executor,
}
impl ByteStore {
pub fn new<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
) -> Result<ByteStore, String> {
Self::new_with_lease_time(executor, path, DEFAULT_LEASE_TIME)
}
pub fn new_with_lease_time<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
lease_time: Duration,
) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
// We want these stores to be allowed to grow very large, in case we are on a system with
// large disks which doesn't want to GC a lot.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in
// VIRT but not RSS). There is no practical upper bound on this number, so we set them
// ridiculously high.
// However! We set them lower than we'd otherwise choose because sometimes we see tests on
// travis fail because they can't allocate virtual memory, if there are multiple Stores
// in memory at the same time. We don't know why they're not efficiently garbage collected
// by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{ | .begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block.
///
pub async fn load_bytes_with<T: Send +'static, F: Fn(&[u8]) -> T + Send + Sync +'static>(
&self,
entry_type: EntryType,
digest: Digest,
f: F,
) -> Result<Option<T>, String> {
if digest == EMPTY_DIGEST {
// Avoid I/O for this case. This allows some client-provided operations (like merging
// snapshots) to work without needing to first store the empty snapshot.
//
// To maintain the guarantee that the given function is called in a blocking context, we
// spawn it as a task.
return Ok(Some(self.executor().spawn_blocking(move || f(&[])).await));
}
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
dbs?.load_bytes_with(digest.0, move |bytes| {
if bytes.len() == digest.1 {
Ok(f(bytes))
} else {
Err(format!("Got hash collision reading from store - digest {:?} was requested, but retrieved bytes with that fingerprint had length {}. Congratulations, you may have broken sha256! Underlying bytes: {:?}", digest, bytes.len(), bytes))
}
}).await
}
pub fn all_digests(&self, entry_type: EntryType) -> Result<Vec<Digest>, String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let mut digests = vec![];
for &(ref env, ref database, ref _lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
digests.push(Digest(fingerprint, bytes.len()));
}
}
Ok(digests)
}
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct AgedFingerprint {
// expired_seconds_ago must be the first field for the Ord implementation.
expired_seconds_ago: u64,
fingerprint: Fingerprint,
size_bytes: usize,
entry_type: EntryType,
} | env | random_line_split |
local.rs | use super::{EntryType, ShrinkBehavior, GIGABYTES};
use std::collections::BinaryHeap;
use std::path::Path;
use std::sync::Arc;
use std::time::{self, Duration};
use bytes::Bytes;
use digest::{Digest as DigestTrait, FixedOutput};
use futures::future;
use hashing::{Digest, Fingerprint, EMPTY_DIGEST};
use lmdb::Error::NotFound;
use lmdb::{self, Cursor, Transaction};
use sha2::Sha256;
use sharded_lmdb::{ShardedLmdb, VersionedFingerprint, DEFAULT_LEASE_TIME};
#[derive(Clone)]
pub struct ByteStore {
inner: Arc<InnerStore>,
}
struct InnerStore {
// Store directories separately from files because:
// 1. They may have different lifetimes.
// 2. It's nice to know whether we should be able to parse something as a proto.
file_dbs: Result<Arc<ShardedLmdb>, String>,
directory_dbs: Result<Arc<ShardedLmdb>, String>,
executor: task_executor::Executor,
}
impl ByteStore {
pub fn new<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
) -> Result<ByteStore, String> {
Self::new_with_lease_time(executor, path, DEFAULT_LEASE_TIME)
}
pub fn new_with_lease_time<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
lease_time: Duration,
) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
// We want these stores to be allowed to grow very large, in case we are on a system with
// large disks which doesn't want to GC a lot.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in
// VIRT but not RSS). There is no practical upper bound on this number, so we set them
// ridiculously high.
// However! We set them lower than we'd otherwise choose because sometimes we see tests on
// travis fail because they can't allocate virtual memory, if there are multiple Stores
// in memory at the same time. We don't know why they're not efficiently garbage collected
// by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{
env
.begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block.
///
pub async fn | <T: Send +'static, F: Fn(&[u8]) -> T + Send + Sync +'static>(
&self,
entry_type: EntryType,
digest: Digest,
f: F,
) -> Result<Option<T>, String> {
if digest == EMPTY_DIGEST {
// Avoid I/O for this case. This allows some client-provided operations (like merging
// snapshots) to work without needing to first store the empty snapshot.
//
// To maintain the guarantee that the given function is called in a blocking context, we
// spawn it as a task.
return Ok(Some(self.executor().spawn_blocking(move || f(&[])).await));
}
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
dbs?.load_bytes_with(digest.0, move |bytes| {
if bytes.len() == digest.1 {
Ok(f(bytes))
} else {
Err(format!("Got hash collision reading from store - digest {:?} was requested, but retrieved bytes with that fingerprint had length {}. Congratulations, you may have broken sha256! Underlying bytes: {:?}", digest, bytes.len(), bytes))
}
}).await
}
pub fn all_digests(&self, entry_type: EntryType) -> Result<Vec<Digest>, String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let mut digests = vec![];
for &(ref env, ref database, ref _lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
digests.push(Digest(fingerprint, bytes.len()));
}
}
Ok(digests)
}
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct AgedFingerprint {
// expired_seconds_ago must be the first field for the Ord implementation.
expired_seconds_ago: u64,
fingerprint: Fingerprint,
size_bytes: usize,
entry_type: EntryType,
}
| load_bytes_with | identifier_name |
runtime.rs | //#![allow(dead_code)]
use std::sync::{Arc};
use std::path::{PathBuf};
use cgmath::{Vector2, Point2};
use input::{Input, Button, Key, ButtonState, ButtonArgs};
use window::{Window, WindowSettings};
use slog::{Logger};
use calcium_flowy::FlowyRenderer;
use flowy::{Ui, Element};
use flowy::style::{Style, Position, Size, SideH, SideV};
use palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected |
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while!window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer.render(&map, &mut world_batches, camera_size);
// Render the player units
for unit in &mut players_units {
unit.render(&mut world_batches);
}
// Submit the world render data
//let camera = Camera::new(32.0, Point2::new(0.0, 0.0));
//Projection::Camera(camera)
render_data.render_sets.push(RenderSet::new(Projection::Pixels, world_batches));
// Render the UI
let mut ui_batches = Vec::new();
ui_renderer.render(&mut ui, &mut ui_batches, camera_size, &mut renderer)?;
render_data.render_sets.push(RenderSet::new(Projection::Pixels, ui_batches));
// Finally do the 2D rendering itself
let mut frame = renderer.start_frame();
simple2d_renderer.render(
&render_data, &mut frame, &mut simple2d_render_target, &mut renderer
);
renderer.finish_frame(frame);
window.swap_buffers();
}
Ok(())
}
}
| {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
} | conditional_block |
runtime.rs | //#![allow(dead_code)]
use std::sync::{Arc};
use std::path::{PathBuf};
use cgmath::{Vector2, Point2};
use input::{Input, Button, Key, ButtonState, ButtonArgs};
use window::{Window, WindowSettings};
use slog::{Logger};
use calcium_flowy::FlowyRenderer;
use flowy::{Ui, Element};
use flowy::style::{Style, Position, Size, SideH, SideV};
use palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
}
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn | (&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while!window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer.render(&map, &mut world_batches, camera_size);
// Render the player units
for unit in &mut players_units {
unit.render(&mut world_batches);
}
// Submit the world render data
//let camera = Camera::new(32.0, Point2::new(0.0, 0.0));
//Projection::Camera(camera)
render_data.render_sets.push(RenderSet::new(Projection::Pixels, world_batches));
// Render the UI
let mut ui_batches = Vec::new();
ui_renderer.render(&mut ui, &mut ui_batches, camera_size, &mut renderer)?;
render_data.render_sets.push(RenderSet::new(Projection::Pixels, ui_batches));
// Finally do the 2D rendering itself
let mut frame = renderer.start_frame();
simple2d_renderer.render(
&render_data, &mut frame, &mut simple2d_render_target, &mut renderer
);
renderer.finish_frame(frame);
window.swap_buffers();
}
Ok(())
}
}
| get_position | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.