file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | #[macro_use]
extern crate log;
extern crate simplelog;
use futures::future;
use futures::future::{BoxFuture, FutureExt};
use reqwest as request;
use base64::encode;
use dirs::home_dir;
use futures::io::SeekFrom;
use regex::Regex;
use request::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use simplelog::*;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{self, BufRead, LineWriter, Seek, Write};
use std::str;
const SURE_USER_AGENT: &str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15";
const TWILIO_BASE_URL: &str = "https://api.twilio.com/2010-04-01";
#[tokio::main]
async fn main() -> SureResult<()> {
init_logging()?;
let client = request::Client::new();
let sess_id = get_session_id(&client).await?;
let mut listings = get_listings(&client, &sess_id, 0).await?;
remove_duplicates(&mut listings);
if listings.markers.len() > 0 {
let listings_map = scrape_listings(&client, &listings).await?;
let desired_listings = get_desired_listings(&listings_map);
if desired_listings.len() > 0 {
let listing_message = build_listing_message(&desired_listings);
send_messages(&client, &listing_message).await?;
}
}
Ok(())
}
fn init_logging() -> SureResult<()> {
let log_file = OpenOptions::new()
.append(true)
.create(true)
.open(&get_sure_filepath("sure.log"))?;
let config = ConfigBuilder::new()
.set_time_format_str("%c")
.set_time_to_local(true)
.build();
CombinedLogger::init(vec![WriteLogger::new(LevelFilter::Info, config, log_file)]).unwrap();
Ok(())
}
async fn get_session_id(client: &request::Client) -> SureResult<String> {
let re = Regex::new(r#"(PHPSESSID=[\w\S]+);"#).unwrap();
let res = client
.get("https://www.utahrealestate.com/index/public.index")
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let sessid = res.headers().get("set-cookie").unwrap().to_str().unwrap();
let mut id = String::from("");
for cap in re.captures_iter(sessid) {
id = String::from(&cap[1]);
}
if id == "" {
panic!("unable to find session id");
}
Ok(id)
}
fn get_listings<'a>(
client: &'a request::Client,
session_id: &'a str,
retry_count: usize,
) -> BoxFuture<'a, SureResult<UreData>> {
if retry_count > 3 {
error!("exceeded retry count - URE must be down");
std::process::exit(0);
}
async move {
let params = get_ure_search_params();
let mut headers = HeaderMap::new();
headers.insert(USER_AGENT, SURE_USER_AGENT.parse().unwrap());
headers.insert(
CONTENT_TYPE,
"application/x-www-form-urlencoded".parse().unwrap(),
);
headers.insert("PHPSESSID", session_id.parse().unwrap());
let res = client
.post("https://www.utahrealestate.com/search/chained.update/param_reset/county_code,o_county_code,city,o_city,zip,o_zip,geometry,o_geometry/count/false/criteria/false/pg/1/limit/50/dh/1190")
.headers(headers)
.body(params)
.send()
.await?;
let res_text = res.text().await?;
match serde_json::from_str(&res_text) {
Ok(v) => Ok(v),
Err(_) => {
error!("failed to parse text, retrying");
Ok(get_listings(client, session_id, retry_count + 1).await?)
}
}
}.boxed()
}
async fn scrape_listings(
client: &request::Client,
data: &UreData,
) -> SureResult<HashMap<String, Html>> {
let mut raw_futures = vec![];
for (index, marker) in data.markers.iter().enumerate() {
raw_futures.push(get_listing(&client, &marker.id, index));
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
let mut documents: HashMap<String, Html> = HashMap::new();
let mut size: usize = 0;
let mut current: f32 = 0.0;
let total: usize = mut_futures.len();
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await {
(Ok((id, _idx, document, content_length)), _index, remaining) => {
current += 1.0;
let percentage = (((current / total as f32) * 100.0) / 2.0) as usize;
io::stdout()
.write(
format!(
"\rdownloading listings {}/{}: [{}>{}]",
current,
total,
"=".repeat(percentage),
" ".repeat(50 - percentage),
)
.as_bytes(),
)
.unwrap();
io::stdout().flush().unwrap();
size += content_length;
documents.insert(id, document);
mut_futures = remaining;
}
(Err(_e), _index, remaining) => {
error!("document failed");
mut_futures = remaining;
}
}
}
println!("\n");
info!(
"downloaded {:.2?}MB from {} listings\n\t\t\t\t└──{:?}{}",
size as f32 / 1000000.0,
total,
documents.iter().map(|v| v.0).collect::<Vec<&String>>(),
" ".repeat(50)
);
Ok(documents)
}
fn get_desired_listings(listing_map: &HashMap<String, Html>) -> Vec<DesiredListing> {
let selector = Selector::parse(".facts___list___items.facts___item").unwrap();
let mut desired_listings: Vec<DesiredListing> = vec![];
for (key, value) in listing_map {
let mut dl = DesiredListing::new();
let div = value.select(&selector).collect::<Vec<_>>();
for node in div {
let mut node_vec = node
.text()
.collect::<Vec<&str>>()
.iter()
.map(|&v| v.trim())
.collect::<Vec<&str>>();
node_vec.retain(|&v| v!= "");
if node_vec[0] == "Days on URE"
&& (node_vec[1] == "Just Listed"
|| node_vec[1].to_string().parse::<usize>().unwrap() >= 20)
{
dl.interested = true;
}
if node_vec[0] == "Status" && node_vec[1] == "Active" {
dl.active = true;
}
}
if dl.is_desired() {
dl.mls = String::from(key);
desired_listings.push(dl);
}
}
desired_listings
}
fn remove_duplicates(listings: &mut UreData) {
let mut dup_idx: Vec<usize> = vec![];
let mut existing = get_checked_listings();
for (idx, listing) in listings.markers.iter().enumerate() {
if existing.contains(&listing.id) {
dup_idx.push(idx);
}
}
if dup_idx.len() > 0 {
for i in dup_idx.into_iter().rev() {
listings.markers.remove(i);
}
}
if listings.markers.len() > 0 {
for listing in listings.markers.iter() {
existing.push(listing.id.clone());
}
write_checked_listings(&existing).unwrap();
} else {
info!("no new listings");
}
}
fn build_listing_message(listings: &Vec<DesiredListing>) -> String {
let mut message_str = String::from("");
for listing in listings {
message_str.push_str(&format!(
"https://www.utahrealestate.com/{}\n\n",
listing.mls
));
}
message_str
}
async fn send_messages(client: &request::Client, message: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let mut raw_futures = vec![];
for number in credentials.alert_numbers.iter() {
raw_futures.push(send_message(&client, &message, number))
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await {
(Ok(_res), _index, remaining) => mut_futures = remaining,
(Err(_e), _index, remaining) => mut_futures = remaining,
}
}
Ok(())
}
async fn get_listing(
client: &request::Client,
id: &str,
index: usize,
) -> SureResult<(String, usize, Html, usize)> {
let url = format!("https://www.utahrealestate.com/{}", id);
let res = client
.get(&url)
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let body = res.text().await?;
let document = Html::parse_document(&body);
Ok((String::from(id), index, document, body.len()))
}
async fn send_message(client: &request::Client, message: &str, to: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let message_url = format!(
"{}/Accounts/{}/Messages.json",
TWILIO_BASE_URL, credentials.sid
);
let mut headers = HeaderMap::new();
headers.insert(
AUTHORIZATION,
format!("Basic {}", credentials.basic_auth())
.parse()
.unwrap(),
);
let params = [
("From", &credentials.number),
("Body", &message.to_string()),
("To", &to.to_string()),
];
let res = client
.post(&message_url)
.headers(headers)
.form(¶ms)
.send()
.await?;
if res.status() == 201 {
info!("message sent");
} else {
er | // Utility Functions
///
fn get_checked_listings() -> Vec<String> {
let mut checked_mls: Vec<String> = vec![];
if let Ok(lines) = read_lines(&get_sure_filepath("listings.txt")) {
for line in lines {
if let Ok(l) = line {
checked_mls.push(String::from(l.trim()))
}
}
}
checked_mls
}
fn write_checked_listings(checked: &Vec<String>) -> SureResult<()> {
let mut contents = String::from("");
let mut file = OpenOptions::new()
.write(true)
.create(true)
.open(&get_sure_filepath("listings.txt"))?;
file.set_len(0)?;
file.seek(SeekFrom::Start(0))?;
let mut file = LineWriter::new(file);
let mut sorted = checked
.iter()
.map(|v| v.parse::<usize>().unwrap())
.collect::<Vec<usize>>();
sorted.sort();
for c in sorted {
contents.push_str(&format!("{}\n", c));
}
file.write_all(contents.as_bytes())?;
Ok(())
}
fn get_ure_search_params() -> String {
let mut param_encoded = String::from("");
if let Ok(lines) = read_lines(&get_sure_filepath("queries.env")) {
for line in lines {
if let Ok(l) = line {
param_encoded.push_str(&format!("{}&", l));
}
}
}
String::from(param_encoded)
}
fn get_twilio_credentials() -> TwilioAuth {
let mut auth = TwilioAuth::new();
if let Ok(lines) = read_lines(&get_sure_filepath("twilio.env")) {
for line in lines {
if let Ok(i) = line {
let config_item: Vec<&str> = i.split('=').collect();
if config_item[0] == "AccountSID" {
auth.sid = String::from(config_item[1]);
}
if config_item[0] == "AuthToken" {
auth.auth_token = String::from(config_item[1]);
}
if config_item[0] == "TwilioNumber" {
auth.number = String::from(config_item[1]);
}
if config_item[0] == "AlertNumbers" {
let numbers: Vec<String> = config_item[1]
.split(",")
.into_iter()
.map(String::from)
.collect();
auth.alert_numbers = numbers;
}
}
}
}
auth
}
fn read_lines(filename: &str) -> io::Result<io::Lines<io::BufReader<File>>> {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
fn get_sure_filepath(filename: &str) -> String {
let mut home_path = home_dir().unwrap();
home_path.push(".sure");
home_path.push(filename);
String::from(home_path.to_str().unwrap())
}
///
///
/// Definitions and Implementations
///
///
///
/// DesiredListing
///
#[derive(Debug)]
struct DesiredListing {
active: bool,
interested: bool,
mls: String,
}
impl DesiredListing {
fn new() -> DesiredListing {
Default::default()
}
fn is_desired(&self) -> bool {
self.active && self.interested
}
}
impl Default for DesiredListing {
fn default() -> Self {
DesiredListing {
active: false,
interested: false,
mls: String::from(""),
}
}
}
///
/// Twilio
///
pub struct TwilioAuth {
sid: String,
auth_token: String,
number: String,
alert_numbers: Vec<String>,
}
impl TwilioAuth {
fn new() -> TwilioAuth {
Default::default()
}
fn basic_auth(&self) -> String {
encode(format!("{}:{}", &self.sid, &self.auth_token).as_bytes())
}
}
impl Default for TwilioAuth {
fn default() -> Self {
TwilioAuth {
sid: String::from(""),
auth_token: String::from(""),
number: String::from(""),
alert_numbers: vec![],
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct TwilioResponse {
error_code: String,
status: String,
}
///
/// SureResult and SureError
///
type SureResult<T> = Result<T, SureError>;
#[derive(Debug)]
enum SureError {
IoError(std::io::Error),
ReqwestError(request::Error),
StdError(Box<dyn std::error::Error>),
JsonError(serde_json::Error),
}
impl From<std::io::Error> for SureError {
fn from(error: std::io::Error) -> Self {
SureError::IoError(error)
}
}
impl From<reqwest::Error> for SureError {
fn from(error: reqwest::Error) -> Self {
SureError::ReqwestError(error)
}
}
impl From<Box<dyn std::error::Error>> for SureError {
fn from(error: Box<dyn std::error::Error>) -> Self {
SureError::StdError(error)
}
}
impl From<serde_json::Error> for SureError {
fn from(error: serde_json::Error) -> Self {
SureError::JsonError(error)
}
}
///
/// UreData
/// └── Vec<Marker>
///
#[derive(Debug, Serialize, Deserialize)]
struct UreData {
markers: Vec<Marker>,
}
#[derive(Debug, Serialize, Deserialize)]
struct Marker {
price: String,
id: String,
}
| ror!(
"error sending message: {:?}\n\t└──{}\n\t└──{:?}",
res.status(),
res.text().await?,
params
)
}
Ok(())
}
///
/ | conditional_block |
main.rs | // Copyright 2020 Xavier Gillard
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This example show how to implement a solver for the maximum independent set problem
//! using ddo. It is a fairly simple example but it features most of the aspects you will
//! want to copy when implementing your own solver.
use std::{cell::RefCell, path::Path, fs::File, io::{BufReader, BufRead}, num::ParseIntError, time::{Duration, Instant}};
use bit_set::BitSet;
use clap::Parser;
use ddo::*;
use regex::Regex;
#[cfg(test)]
mod tests;
/// This structure represents an instance of the Maximum Independent Set Problem.
/// It is this structure that implements a simple dynamic programming model for the
/// MISP. In that model, the state is simply a bitset where each bit represents
/// a node that may be kept or left out of the MIS.
pub struct Misp {
/// The number of variables in the problem instance
nb_vars: usize,
/// For each vertex 'i' of the original graph, the field 'neighbors[i]' contains
/// a bitmask representing the COMPLEMENT of the adjacency list of i in the
/// original graph. While this may seem a complicated take on the representation
/// of this problem instance, using the complement is helpful as it allows to
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
fn nb_variables(&self) -> usize {
self.nb_vars
}
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn for_each_in_domain(&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool |
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new(r"^c\s.*$").unwrap();
let pb_decl = Regex::new(r"^p\s+edge\s+(?P<vars>\d+)\s+(?P<edges>\d+)$").unwrap();
let node_decl = Regex::new(r"^n\s+(?P<node>\d+)\s+(?P<weight>-?\d+)").unwrap();
let edge_decl = Regex::new(r"^e\s+(?P<src>\d+)\s+(?P<dst>\d+)").unwrap();
let mut g = Misp{nb_vars: 0, neighbors: vec![], weight: vec![]};
for line in f.lines() {
let line = line?;
let line = line.trim();
if line.is_empty() {
continue;
}
if comment.is_match(line) {
continue;
}
if let Some(caps) = pb_decl.captures(line) {
let n = caps["vars"].to_string().parse::<usize>()?;
let full = (0..n).collect();
g.nb_vars = n;
g.neighbors = vec![full; n];
g.weight = vec![1; n];
continue;
}
if let Some(caps) = node_decl.captures(line) {
let n = caps["node"].to_string().parse::<usize>()?;
let w = caps["weight"].to_string().parse::<isize>()?;
let n = n - 1;
g.weight[n] = w;
continue;
}
if let Some(caps) = edge_decl.captures(line) {
let src = caps["src"].to_string().parse::<usize>()?;
let dst = caps["dst"].to_string().parse::<usize>()?;
let src = src-1;
let dst = dst-1;
g.neighbors[src].remove(dst);
g.neighbors[dst].remove(src);
continue;
}
// skip
return Err(Error::Format)
}
Ok(g)
}
/// An utility function to return an max width heuristic that can either be a fixed width
/// policy (if w is fixed) or an adaptive policy returning the number of unassigned variables
/// in the overall problem.
fn max_width<P: Problem>(p: &P, w: Option<usize>) -> Box<dyn WidthHeuristic<P::State> + Send + Sync> {
if let Some(w) = w {
Box::new(FixedWidth(w))
} else {
Box::new(NbUnassignedWidth(p.nb_variables()))
}
}
/// An utility function to return a cutoff heuristic that can either be a time budget policy
/// (if timeout is fixed) or no cutoff policy.
fn cutoff(timeout: Option<u64>) -> Box<dyn Cutoff + Send + Sync> {
if let Some(t) = timeout {
Box::new(TimeBudget::new(Duration::from_secs(t)))
} else {
Box::new(NoCutoff)
}
}
/// This is your executable's entry point. It is the place where all the pieces are put together
/// to create a fast an effective solver for the misp problem.
fn main() {
let args = Args::parse();
let fname = &args.fname;
let problem = read_instance(fname).unwrap();
let relaxation = MispRelax {pb: &problem};
let ranking = MispRanking;
let width = max_width(&problem, args.width);
let dominance = EmptyDominanceChecker::default();
let cutoff = cutoff(args.duration);
let mut fringe = NoDupFringe::new(MaxUB::new(&ranking));
// This solver compile DD that allow the definition of long arcs spanning over several layers.
let mut solver = ParNoBarrierSolverLel::custom(
&problem,
&relaxation,
&ranking,
width.as_ref(),
&dominance,
cutoff.as_ref(),
&mut fringe,
args.threads,
);
let start = Instant::now();
let Completion{ is_exact, best_value } = solver.maximize();
let duration = start.elapsed();
let upper_bound = solver.best_upper_bound();
let lower_bound = solver.best_lower_bound();
let gap = solver.gap();
let best_solution: Option<Vec<_>> = solver.best_solution().map(|mut decisions|{
decisions.sort_unstable_by_key(|d| d.variable.id());
decisions.iter()
.filter(|d| d.value == 1)
.map(|d| d.variable.id())
.collect()
});
// check solution
if let Some(bs) = best_solution.as_ref() {
for (i, a) in bs.iter().copied().enumerate() {
for b in bs.iter().copied().skip(i+1) {
if!problem.neighbors[a].contains(b) {
println!("not a solution! {a} -- {b}");
}
}
}
}
println!("Duration: {:.3} seconds", duration.as_secs_f32());
println!("Objective: {}", best_value.unwrap_or(-1));
println!("Upper Bnd: {}", upper_bound);
println!("Lower Bnd: {}", lower_bound);
println!("Gap: {:.3}", gap);
println!("Aborted: {}", !is_exact);
println!("Solution: {:?}", best_solution.unwrap_or_default());
}
| {
state.contains(var.id())
} | identifier_body |
main.rs | // Copyright 2020 Xavier Gillard
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This example show how to implement a solver for the maximum independent set problem
//! using ddo. It is a fairly simple example but it features most of the aspects you will
//! want to copy when implementing your own solver.
use std::{cell::RefCell, path::Path, fs::File, io::{BufReader, BufRead}, num::ParseIntError, time::{Duration, Instant}};
use bit_set::BitSet;
use clap::Parser;
use ddo::*;
use regex::Regex;
#[cfg(test)]
mod tests;
/// This structure represents an instance of the Maximum Independent Set Problem.
/// It is this structure that implements a simple dynamic programming model for the
/// MISP. In that model, the state is simply a bitset where each bit represents
/// a node that may be kept or left out of the MIS.
pub struct Misp {
/// The number of variables in the problem instance
nb_vars: usize,
/// For each vertex 'i' of the original graph, the field 'neighbors[i]' contains
/// a bitmask representing the COMPLEMENT of the adjacency list of i in the
/// original graph. While this may seem a complicated take on the representation
/// of this problem instance, using the complement is helpful as it allows to
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
fn nb_variables(&self) -> usize {
self.nb_vars
}
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn for_each_in_domain(&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool {
state.contains(var.id())
}
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new(r"^c\s.*$").unwrap();
let pb_decl = Regex::new(r"^p\s+edge\s+(?P<vars>\d+)\s+(?P<edges>\d+)$").unwrap();
let node_decl = Regex::new(r"^n\s+(?P<node>\d+)\s+(?P<weight>-?\d+)").unwrap();
let edge_decl = Regex::new(r"^e\s+(?P<src>\d+)\s+(?P<dst>\d+)").unwrap();
let mut g = Misp{nb_vars: 0, neighbors: vec![], weight: vec![]};
for line in f.lines() {
let line = line?;
let line = line.trim();
if line.is_empty() |
if comment.is_match(line) {
continue;
}
if let Some(caps) = pb_decl.captures(line) {
let n = caps["vars"].to_string().parse::<usize>()?;
let full = (0..n).collect();
g.nb_vars = n;
g.neighbors = vec![full; n];
g.weight = vec![1; n];
continue;
}
if let Some(caps) = node_decl.captures(line) {
let n = caps["node"].to_string().parse::<usize>()?;
let w = caps["weight"].to_string().parse::<isize>()?;
let n = n - 1;
g.weight[n] = w;
continue;
}
if let Some(caps) = edge_decl.captures(line) {
let src = caps["src"].to_string().parse::<usize>()?;
let dst = caps["dst"].to_string().parse::<usize>()?;
let src = src-1;
let dst = dst-1;
g.neighbors[src].remove(dst);
g.neighbors[dst].remove(src);
continue;
}
// skip
return Err(Error::Format)
}
Ok(g)
}
/// An utility function to return an max width heuristic that can either be a fixed width
/// policy (if w is fixed) or an adaptive policy returning the number of unassigned variables
/// in the overall problem.
fn max_width<P: Problem>(p: &P, w: Option<usize>) -> Box<dyn WidthHeuristic<P::State> + Send + Sync> {
if let Some(w) = w {
Box::new(FixedWidth(w))
} else {
Box::new(NbUnassignedWidth(p.nb_variables()))
}
}
/// An utility function to return a cutoff heuristic that can either be a time budget policy
/// (if timeout is fixed) or no cutoff policy.
fn cutoff(timeout: Option<u64>) -> Box<dyn Cutoff + Send + Sync> {
if let Some(t) = timeout {
Box::new(TimeBudget::new(Duration::from_secs(t)))
} else {
Box::new(NoCutoff)
}
}
/// This is your executable's entry point. It is the place where all the pieces are put together
/// to create a fast an effective solver for the misp problem.
fn main() {
let args = Args::parse();
let fname = &args.fname;
let problem = read_instance(fname).unwrap();
let relaxation = MispRelax {pb: &problem};
let ranking = MispRanking;
let width = max_width(&problem, args.width);
let dominance = EmptyDominanceChecker::default();
let cutoff = cutoff(args.duration);
let mut fringe = NoDupFringe::new(MaxUB::new(&ranking));
// This solver compile DD that allow the definition of long arcs spanning over several layers.
let mut solver = ParNoBarrierSolverLel::custom(
&problem,
&relaxation,
&ranking,
width.as_ref(),
&dominance,
cutoff.as_ref(),
&mut fringe,
args.threads,
);
let start = Instant::now();
let Completion{ is_exact, best_value } = solver.maximize();
let duration = start.elapsed();
let upper_bound = solver.best_upper_bound();
let lower_bound = solver.best_lower_bound();
let gap = solver.gap();
let best_solution: Option<Vec<_>> = solver.best_solution().map(|mut decisions|{
decisions.sort_unstable_by_key(|d| d.variable.id());
decisions.iter()
.filter(|d| d.value == 1)
.map(|d| d.variable.id())
.collect()
});
// check solution
if let Some(bs) = best_solution.as_ref() {
for (i, a) in bs.iter().copied().enumerate() {
for b in bs.iter().copied().skip(i+1) {
if!problem.neighbors[a].contains(b) {
println!("not a solution! {a} -- {b}");
}
}
}
}
println!("Duration: {:.3} seconds", duration.as_secs_f32());
println!("Objective: {}", best_value.unwrap_or(-1));
println!("Upper Bnd: {}", upper_bound);
println!("Lower Bnd: {}", lower_bound);
println!("Gap: {:.3}", gap);
println!("Aborted: {}", !is_exact);
println!("Solution: {:?}", best_solution.unwrap_or_default());
}
| {
continue;
} | conditional_block |
main.rs | // Copyright 2020 Xavier Gillard
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This example show how to implement a solver for the maximum independent set problem
//! using ddo. It is a fairly simple example but it features most of the aspects you will
//! want to copy when implementing your own solver.
use std::{cell::RefCell, path::Path, fs::File, io::{BufReader, BufRead}, num::ParseIntError, time::{Duration, Instant}};
use bit_set::BitSet;
use clap::Parser;
use ddo::*;
use regex::Regex;
#[cfg(test)]
mod tests;
/// This structure represents an instance of the Maximum Independent Set Problem.
/// It is this structure that implements a simple dynamic programming model for the
/// MISP. In that model, the state is simply a bitset where each bit represents
/// a node that may be kept or left out of the MIS.
pub struct Misp {
/// The number of variables in the problem instance
nb_vars: usize,
/// For each vertex 'i' of the original graph, the field 'neighbors[i]' contains
/// a bitmask representing the COMPLEMENT of the adjacency list of i in the
/// original graph. While this may seem a complicated take on the representation
/// of this problem instance, using the complement is helpful as it allows to
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
|
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn for_each_in_domain(&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool {
state.contains(var.id())
}
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new(r"^c\s.*$").unwrap();
let pb_decl = Regex::new(r"^p\s+edge\s+(?P<vars>\d+)\s+(?P<edges>\d+)$").unwrap();
let node_decl = Regex::new(r"^n\s+(?P<node>\d+)\s+(?P<weight>-?\d+)").unwrap();
let edge_decl = Regex::new(r"^e\s+(?P<src>\d+)\s+(?P<dst>\d+)").unwrap();
let mut g = Misp{nb_vars: 0, neighbors: vec![], weight: vec![]};
for line in f.lines() {
let line = line?;
let line = line.trim();
if line.is_empty() {
continue;
}
if comment.is_match(line) {
continue;
}
if let Some(caps) = pb_decl.captures(line) {
let n = caps["vars"].to_string().parse::<usize>()?;
let full = (0..n).collect();
g.nb_vars = n;
g.neighbors = vec![full; n];
g.weight = vec![1; n];
continue;
}
if let Some(caps) = node_decl.captures(line) {
let n = caps["node"].to_string().parse::<usize>()?;
let w = caps["weight"].to_string().parse::<isize>()?;
let n = n - 1;
g.weight[n] = w;
continue;
}
if let Some(caps) = edge_decl.captures(line) {
let src = caps["src"].to_string().parse::<usize>()?;
let dst = caps["dst"].to_string().parse::<usize>()?;
let src = src-1;
let dst = dst-1;
g.neighbors[src].remove(dst);
g.neighbors[dst].remove(src);
continue;
}
// skip
return Err(Error::Format)
}
Ok(g)
}
/// An utility function to return an max width heuristic that can either be a fixed width
/// policy (if w is fixed) or an adaptive policy returning the number of unassigned variables
/// in the overall problem.
fn max_width<P: Problem>(p: &P, w: Option<usize>) -> Box<dyn WidthHeuristic<P::State> + Send + Sync> {
if let Some(w) = w {
Box::new(FixedWidth(w))
} else {
Box::new(NbUnassignedWidth(p.nb_variables()))
}
}
/// An utility function to return a cutoff heuristic that can either be a time budget policy
/// (if timeout is fixed) or no cutoff policy.
fn cutoff(timeout: Option<u64>) -> Box<dyn Cutoff + Send + Sync> {
if let Some(t) = timeout {
Box::new(TimeBudget::new(Duration::from_secs(t)))
} else {
Box::new(NoCutoff)
}
}
/// This is your executable's entry point. It is the place where all the pieces are put together
/// to create a fast an effective solver for the misp problem.
fn main() {
let args = Args::parse();
let fname = &args.fname;
let problem = read_instance(fname).unwrap();
let relaxation = MispRelax {pb: &problem};
let ranking = MispRanking;
let width = max_width(&problem, args.width);
let dominance = EmptyDominanceChecker::default();
let cutoff = cutoff(args.duration);
let mut fringe = NoDupFringe::new(MaxUB::new(&ranking));
// This solver compile DD that allow the definition of long arcs spanning over several layers.
let mut solver = ParNoBarrierSolverLel::custom(
&problem,
&relaxation,
&ranking,
width.as_ref(),
&dominance,
cutoff.as_ref(),
&mut fringe,
args.threads,
);
let start = Instant::now();
let Completion{ is_exact, best_value } = solver.maximize();
let duration = start.elapsed();
let upper_bound = solver.best_upper_bound();
let lower_bound = solver.best_lower_bound();
let gap = solver.gap();
let best_solution: Option<Vec<_>> = solver.best_solution().map(|mut decisions|{
decisions.sort_unstable_by_key(|d| d.variable.id());
decisions.iter()
.filter(|d| d.value == 1)
.map(|d| d.variable.id())
.collect()
});
// check solution
if let Some(bs) = best_solution.as_ref() {
for (i, a) in bs.iter().copied().enumerate() {
for b in bs.iter().copied().skip(i+1) {
if!problem.neighbors[a].contains(b) {
println!("not a solution! {a} -- {b}");
}
}
}
}
println!("Duration: {:.3} seconds", duration.as_secs_f32());
println!("Objective: {}", best_value.unwrap_or(-1));
println!("Upper Bnd: {}", upper_bound);
println!("Lower Bnd: {}", lower_bound);
println!("Gap: {:.3}", gap);
println!("Aborted: {}", !is_exact);
println!("Solution: {:?}", best_solution.unwrap_or_default());
} | fn nb_variables(&self) -> usize {
self.nb_vars
} | random_line_split |
main.rs | // Copyright 2020 Xavier Gillard
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This example show how to implement a solver for the maximum independent set problem
//! using ddo. It is a fairly simple example but it features most of the aspects you will
//! want to copy when implementing your own solver.
use std::{cell::RefCell, path::Path, fs::File, io::{BufReader, BufRead}, num::ParseIntError, time::{Duration, Instant}};
use bit_set::BitSet;
use clap::Parser;
use ddo::*;
use regex::Regex;
#[cfg(test)]
mod tests;
/// This structure represents an instance of the Maximum Independent Set Problem.
/// It is this structure that implements a simple dynamic programming model for the
/// MISP. In that model, the state is simply a bitset where each bit represents
/// a node that may be kept or left out of the MIS.
pub struct Misp {
/// The number of variables in the problem instance
nb_vars: usize,
/// For each vertex 'i' of the original graph, the field 'neighbors[i]' contains
/// a bitmask representing the COMPLEMENT of the adjacency list of i in the
/// original graph. While this may seem a complicated take on the representation
/// of this problem instance, using the complement is helpful as it allows to
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
fn nb_variables(&self) -> usize {
self.nb_vars
}
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn | (&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool {
state.contains(var.id())
}
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new(r"^c\s.*$").unwrap();
let pb_decl = Regex::new(r"^p\s+edge\s+(?P<vars>\d+)\s+(?P<edges>\d+)$").unwrap();
let node_decl = Regex::new(r"^n\s+(?P<node>\d+)\s+(?P<weight>-?\d+)").unwrap();
let edge_decl = Regex::new(r"^e\s+(?P<src>\d+)\s+(?P<dst>\d+)").unwrap();
let mut g = Misp{nb_vars: 0, neighbors: vec![], weight: vec![]};
for line in f.lines() {
let line = line?;
let line = line.trim();
if line.is_empty() {
continue;
}
if comment.is_match(line) {
continue;
}
if let Some(caps) = pb_decl.captures(line) {
let n = caps["vars"].to_string().parse::<usize>()?;
let full = (0..n).collect();
g.nb_vars = n;
g.neighbors = vec![full; n];
g.weight = vec![1; n];
continue;
}
if let Some(caps) = node_decl.captures(line) {
let n = caps["node"].to_string().parse::<usize>()?;
let w = caps["weight"].to_string().parse::<isize>()?;
let n = n - 1;
g.weight[n] = w;
continue;
}
if let Some(caps) = edge_decl.captures(line) {
let src = caps["src"].to_string().parse::<usize>()?;
let dst = caps["dst"].to_string().parse::<usize>()?;
let src = src-1;
let dst = dst-1;
g.neighbors[src].remove(dst);
g.neighbors[dst].remove(src);
continue;
}
// skip
return Err(Error::Format)
}
Ok(g)
}
/// An utility function to return an max width heuristic that can either be a fixed width
/// policy (if w is fixed) or an adaptive policy returning the number of unassigned variables
/// in the overall problem.
fn max_width<P: Problem>(p: &P, w: Option<usize>) -> Box<dyn WidthHeuristic<P::State> + Send + Sync> {
if let Some(w) = w {
Box::new(FixedWidth(w))
} else {
Box::new(NbUnassignedWidth(p.nb_variables()))
}
}
/// An utility function to return a cutoff heuristic that can either be a time budget policy
/// (if timeout is fixed) or no cutoff policy.
fn cutoff(timeout: Option<u64>) -> Box<dyn Cutoff + Send + Sync> {
if let Some(t) = timeout {
Box::new(TimeBudget::new(Duration::from_secs(t)))
} else {
Box::new(NoCutoff)
}
}
/// This is your executable's entry point. It is the place where all the pieces are put together
/// to create a fast an effective solver for the misp problem.
fn main() {
let args = Args::parse();
let fname = &args.fname;
let problem = read_instance(fname).unwrap();
let relaxation = MispRelax {pb: &problem};
let ranking = MispRanking;
let width = max_width(&problem, args.width);
let dominance = EmptyDominanceChecker::default();
let cutoff = cutoff(args.duration);
let mut fringe = NoDupFringe::new(MaxUB::new(&ranking));
// This solver compile DD that allow the definition of long arcs spanning over several layers.
let mut solver = ParNoBarrierSolverLel::custom(
&problem,
&relaxation,
&ranking,
width.as_ref(),
&dominance,
cutoff.as_ref(),
&mut fringe,
args.threads,
);
let start = Instant::now();
let Completion{ is_exact, best_value } = solver.maximize();
let duration = start.elapsed();
let upper_bound = solver.best_upper_bound();
let lower_bound = solver.best_lower_bound();
let gap = solver.gap();
let best_solution: Option<Vec<_>> = solver.best_solution().map(|mut decisions|{
decisions.sort_unstable_by_key(|d| d.variable.id());
decisions.iter()
.filter(|d| d.value == 1)
.map(|d| d.variable.id())
.collect()
});
// check solution
if let Some(bs) = best_solution.as_ref() {
for (i, a) in bs.iter().copied().enumerate() {
for b in bs.iter().copied().skip(i+1) {
if!problem.neighbors[a].contains(b) {
println!("not a solution! {a} -- {b}");
}
}
}
}
println!("Duration: {:.3} seconds", duration.as_secs_f32());
println!("Objective: {}", best_value.unwrap_or(-1));
println!("Upper Bnd: {}", upper_bound);
println!("Lower Bnd: {}", lower_bound);
println!("Gap: {:.3}", gap);
println!("Aborted: {}", !is_exact);
println!("Solution: {:?}", best_solution.unwrap_or_default());
}
| for_each_in_domain | identifier_name |
main.rs | mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.", | }
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b!= 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if!(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if!(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if!left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if!right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if!left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if!right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if!notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) &&!notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if!notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if!MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res | if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
); | random_line_split |
main.rs | mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b!= 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
| a,
c
);
return true;
}
let left_failure = if!(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if!(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if!left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if!right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if!left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if!right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if!notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) &&!notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if!notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if!MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d | writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
| conditional_block |
main.rs | ppy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b!= 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if!(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if!(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if!left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if!right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if!left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if!right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if!notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) &&!notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if!notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if!MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = t | rue;
| identifier_name |
|
main.rs | mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b!= 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4!= 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if!(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if!(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if!(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if!(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$ | let ac4 = b * b - disc;
if ac4 % 4!= 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a!= 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g!= 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if!left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if!right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if!left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if!right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if!notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) &&!notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\righ
t) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if!notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if!MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d | b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b); | identifier_body |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct CSR {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] {
&mut self.vtxprop
}
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
///...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
}); |
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/ |
/*return the graph, g*/
g | random_line_split |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct | {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] {
&mut self.vtxprop
}
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
///...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/
| CSR | identifier_name |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct CSR {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] |
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
///...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/
| {
&mut self.vtxprop
} | identifier_body |
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`,..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111!= 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path!= path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" }; |
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if!buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft|!ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft|!ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft|!ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext!= *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) ->! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if!root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
} | random_line_split |
|
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`,..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111!= 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) | else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path!= path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if!buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft|!ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft|!ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft|!ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext!= *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) ->! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if!root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
}
| {
&ls_colors.symlink
} | conditional_block |
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`,..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111!= 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path!= path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn | (root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if!buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft|!ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft|!ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft|!ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext!= *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) ->! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if!root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
}
| scan | identifier_name |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals,.. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
} | }
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
} | random_line_split |
|
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn | (
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals,.. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| run_upsert | identifier_name |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest |
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals,.. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| {
KeyValueRequest::Set { key, value, expiry }
} | identifier_body |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals,.. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column |
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| {
id = v.as_string().ok();
} | conditional_block |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self |
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait),..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()),..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
} | identifier_body |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait),..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()),..success },
)))
.expect("failed to send response");
} else |
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
} | conditional_block |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait),..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn | (
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()),..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| handle_request | identifier_name |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait),..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response"); | ),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()),..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
} | }
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response | random_line_split |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module!= "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn | (&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
//... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| interpret_descriptor | identifier_name |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module!= "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> | for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
//... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len()); | identifier_body |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>, | impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module!= "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
//... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
} | }
| random_line_split |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module!= "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
//... otherwise this is a normal call so we recurse.
} else |
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
} | conditional_block |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once | // I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if!self.has_all(d) {
continue;
}
let mut o = other.0 &!dir.0;
let mut cnt = 0;
while o!= 0 {
cnt += 1;
d = d.left();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 &!dir.0;
cnt = 0;
while o!= 0 {
cnt += 1;
d = d.right();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty =>'',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
} | pub fn min_rotation(self, other: Self) -> usize { | random_line_split |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if!self.has_all(d) {
continue;
}
let mut o = other.0 &!dir.0;
let mut cnt = 0;
while o!= 0 {
cnt += 1;
d = d.left();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 &!dir.0;
cnt = 0;
while o!= 0 {
cnt += 1;
d = d.right();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum | {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty =>'',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
}
| Field | identifier_name |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self |
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if!self.has_all(d) {
continue;
}
let mut o = other.0 &!dir.0;
let mut cnt = 0;
while o!= 0 {
cnt += 1;
d = d.left();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 &!dir.0;
cnt = 0;
while o!= 0 {
cnt += 1;
d = d.right();
o &=!d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty =>'',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
}
| {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
} | identifier_body |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count,.. }) = self.frame_buffer {
sample_count.into()
} else |
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) ->! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| {
SampleCount::Single
} | conditional_block |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count,.. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn | (window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) ->! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| new | identifier_name |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count,.. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState |
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) ->! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| {
self.surface_handler.multisample_state()
} | identifier_body |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count,.. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
| usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) ->! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}; | fn create_buffer<T>(
&self,
label: &str,
contents: &[T], | random_line_split |
main.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
mod disk;
mod isolation;
mod net;
mod runtime;
mod share;
mod ssh;
mod types;
mod utils;
mod vm;
use std::env;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process::Command;
use anyhow::anyhow;
use anyhow::Context;
use clap::Args;
use clap::Parser;
use clap::Subcommand;
use image_test_lib::KvPair;
use image_test_lib::Test;
use json_arg::JsonFile;
use tempfile::tempdir;
use tracing::debug;
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::prelude::*;
use crate::isolation::default_passthrough_envs;
use crate::isolation::is_isolated;
use crate::isolation::isolated;
use crate::isolation::Platform;
use crate::runtime::set_runtime;
use crate::types::MachineOpts;
use crate::types::RuntimeOpts;
use crate::types::VMArgs;
use crate::utils::console_output_path_for_tpx;
use crate::utils::log_command;
use crate::vm::VM;
type Result<T> = std::result::Result<T, anyhow::Error>;
#[derive(Debug, Parser)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Debug, Subcommand)]
enum Commands {
/// Run the VM. Must be executed inside container.
Run(RunCmdArgs),
/// Respawn inside isolated image and execute `Run` command.
Isolate(IsolateCmdArgs),
/// Run VM tests inside container.
Test(IsolateCmdArgs),
}
/// Execute the VM
#[derive(Debug, Args)]
struct | {
/// Json-encoded file for VM machine configuration
#[arg(long)]
machine_spec: JsonFile<MachineOpts>,
/// Json-encoded file describing paths of binaries required by VM
#[arg(long)]
runtime_spec: JsonFile<RuntimeOpts>,
#[clap(flatten)]
vm_args: VMArgs,
}
/// Spawn a container and execute the VM inside.
#[derive(Debug, Args)]
struct IsolateCmdArgs {
/// Path to container image.
#[arg(long)]
image: PathBuf,
/// Set these env vars in the container. If VM executes a command, these
/// env vars will also be prepended to the command.
#[arg(long)]
setenv: Vec<KvPair>,
/// Args for run command
#[clap(flatten)]
run_cmd_args: RunCmdArgs,
}
/// Actually starting the VM. This needs to be inside an ephemeral container as
/// lots of resources relies on container for clean up.
fn run(args: &RunCmdArgs) -> Result<()> {
if!is_isolated()? {
return Err(anyhow!("run must be called from inside container"));
}
debug!("RuntimeOpts: {:?}", args.runtime_spec);
debug!("MachineOpts: {:?}", args.machine_spec);
debug!("VMArgs: {:?}", args.vm_args);
set_runtime(args.runtime_spec.clone().into_inner())
.map_err(|_| anyhow!("Failed to set runtime"))?;
Ok(VM::new(args.machine_spec.clone().into_inner(), args.vm_args.clone())?.run()?)
}
/// Enter isolated container and then respawn itself inside it with `run`
/// command and its parameters.
fn respawn(args: &IsolateCmdArgs) -> Result<()> {
let mut envs = default_passthrough_envs();
envs.extend(args.setenv.clone());
let mut vm_args = args.run_cmd_args.vm_args.clone();
vm_args.command_envs = envs.clone();
// Let's always capture console output unless it's console mode
let _console_dir;
if!vm_args.mode.console && vm_args.console_output_file.is_none() {
let dir = tempdir().context("Failed to create temp dir for console output")?;
vm_args.console_output_file = Some(dir.path().join("console.txt"));
_console_dir = dir;
}
let isolated = isolated(&args.image, envs, vm_args.get_container_output_dirs())?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(vm_args.to_args());
log_command(&mut command).status()?;
Ok(())
}
/// Merge all sources of our envs into final list of env vars we should use
/// everywhere for tests. Dedup is handled by functions that use the result.
fn get_test_envs(from_cli: &[KvPair]) -> Vec<KvPair> {
// This handles common envs like RUST_LOG
let mut envs = default_passthrough_envs();
envs.extend_from_slice(from_cli);
// forward test runner env vars to the inner test
for (key, val) in std::env::vars() {
if key.starts_with("TEST_PILOT") {
envs.push((key, OsString::from(val)).into());
}
}
envs
}
/// Validated `VMArgs` and other necessary metadata for tests.
struct ValidatedVMArgs {
/// VMArgs that will be passed into the VM with modified fields
inner: VMArgs,
/// True if the test command is listing tests
is_list: bool,
}
/// Further validate `VMArgs` parsed by clap and generate a new `VMArgs` with
/// content specific to test execution.
fn get_test_vm_args(orig_args: &VMArgs, cli_envs: &[KvPair]) -> Result<ValidatedVMArgs> {
if orig_args.timeout_secs.is_none() {
return Err(anyhow!("Test command must specify --timeout-secs."));
}
if!orig_args.output_dirs.is_empty() {
return Err(anyhow!(
"Test command must not specify --output-dirs. \
This will be parsed from env and test command parameters instead."
));
}
let envs = get_test_envs(cli_envs);
#[derive(Debug, Parser)]
struct TestArgsParser {
#[clap(subcommand)]
test: Test,
}
let mut orig_command = vec![OsString::from("bogus_exec")];
orig_command.extend_from_slice(
&orig_args
.mode
.command
.clone()
.ok_or(anyhow!("Test command must not be empty"))?,
);
let test_args = TestArgsParser::try_parse_from(orig_command)
.context("Test command does not match expected format of `<type> <command>`")?;
let is_list = test_args.test.is_list_tests();
let mut vm_args = orig_args.clone();
vm_args.output_dirs = test_args.test.output_dirs().into_iter().collect();
vm_args.mode.command = Some(test_args.test.into_inner_cmd());
vm_args.command_envs = envs;
vm_args.console_output_file = console_output_path_for_tpx()?;
Ok(ValidatedVMArgs {
inner: vm_args,
is_list,
})
}
/// For some tests, an explicit "list test" step is run against the test binary
/// to discover the tests to run. This command is not our intended test to
/// execute, so it's unnecessarily wasteful to execute it inside the VM. We
/// directly run it inside the container without booting VM.
fn list_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let mut output_dirs = validated_args.inner.get_container_output_dirs();
// RW bind-mount /dev/fuse for running XAR.
// More details in antlir/antlir2/testing/image_test/src/main.rs.
output_dirs.insert(PathBuf::from("/dev/fuse"));
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
output_dirs,
)?;
let mut inner_cmd = validated_args
.inner
.mode
.command
.as_ref()
.expect("command must exist here")
.iter();
let mut command = isolated.command(inner_cmd.next().expect("must have program arg"))?;
command.args(inner_cmd);
Ok(command)
}
/// For actual test command, we spawn the VM and run it.
fn vm_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
validated_args.inner.get_container_output_dirs(),
)?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(validated_args.inner.to_args());
Ok(command)
}
/// `test` is similar to `respawn`, except that we assume control for some
/// inputs instead of allowing caller to pass them in. Some inputs are parsed
/// from the test command.
fn test(args: &IsolateCmdArgs) -> Result<()> {
let validated_args = get_test_vm_args(&args.run_cmd_args.vm_args, &args.setenv)?;
let mut command = if validated_args.is_list {
list_test_command(args, &validated_args)
} else {
vm_test_command(args, &validated_args)
}?;
log_command(&mut command).status()?;
Ok(())
}
fn main() -> Result<()> {
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::Layer::default())
.with(
tracing_subscriber::EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env()
.expect("Invalid logging level set by env"),
)
.init();
Platform::set()?;
debug!("Args: {:?}", env::args());
let cli = Cli::parse();
match &cli.command {
Commands::Isolate(args) => respawn(args),
Commands::Run(args) => run(args),
Commands::Test(args) => test(args),
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::types::VMModeArgs;
#[test]
fn test_get_test_envs() {
env::set_var("RUST_LOG", "hello");
env::set_var("TEST_PILOT_A", "A");
let from_cli = vec![KvPair::from(("foo", "bar"))];
assert_eq!(
get_test_envs(&from_cli),
vec![
KvPair::from(("RUST_LOG", "hello")),
KvPair::from(("foo", "bar")),
KvPair::from(("TEST_PILOT_A", "A")),
],
)
}
#[test]
fn test_get_test_vm_args() {
let valid = VMArgs {
timeout_secs: Some(1),
mode: VMModeArgs {
command: Some(["custom", "whatever"].iter().map(OsString::from).collect()),
..Default::default()
},
..Default::default()
};
let empty_env = Vec::<KvPair>::new();
let mut expected = valid.clone();
expected.mode.command = Some(vec![OsString::from("whatever")]);
let parsed = get_test_vm_args(&valid, &empty_env).expect("Parsing should succeed");
assert_eq!(parsed.inner, expected);
assert!(!parsed.is_list);
let mut timeout = valid.clone();
timeout.timeout_secs = None;
assert!(get_test_vm_args(&timeout, &empty_env).is_err());
let mut output_dirs = valid.clone();
output_dirs.output_dirs = vec![PathBuf::from("/some")];
assert!(get_test_vm_args(&output_dirs, &empty_env).is_err());
let mut command = valid;
command.mode.command = None;
assert!(get_test_vm_args(&command, &empty_env).is_err());
command.mode.command = Some(vec![OsString::from("invalid")]);
assert!(get_test_vm_args(&command, &empty_env).is_err());
}
}
| RunCmdArgs | identifier_name |
main.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
mod disk;
mod isolation;
mod net;
mod runtime;
mod share;
mod ssh;
mod types;
mod utils;
mod vm;
use std::env;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process::Command;
use anyhow::anyhow;
use anyhow::Context;
use clap::Args;
use clap::Parser;
use clap::Subcommand;
use image_test_lib::KvPair;
use image_test_lib::Test;
use json_arg::JsonFile;
use tempfile::tempdir;
use tracing::debug;
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::prelude::*;
use crate::isolation::default_passthrough_envs;
use crate::isolation::is_isolated;
use crate::isolation::isolated;
use crate::isolation::Platform;
use crate::runtime::set_runtime;
use crate::types::MachineOpts;
use crate::types::RuntimeOpts;
use crate::types::VMArgs;
use crate::utils::console_output_path_for_tpx;
use crate::utils::log_command;
use crate::vm::VM;
type Result<T> = std::result::Result<T, anyhow::Error>;
#[derive(Debug, Parser)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Debug, Subcommand)]
enum Commands {
/// Run the VM. Must be executed inside container.
Run(RunCmdArgs),
/// Respawn inside isolated image and execute `Run` command.
Isolate(IsolateCmdArgs),
/// Run VM tests inside container.
Test(IsolateCmdArgs),
}
/// Execute the VM
#[derive(Debug, Args)]
struct RunCmdArgs {
/// Json-encoded file for VM machine configuration
#[arg(long)]
machine_spec: JsonFile<MachineOpts>,
/// Json-encoded file describing paths of binaries required by VM
#[arg(long)]
runtime_spec: JsonFile<RuntimeOpts>,
#[clap(flatten)]
vm_args: VMArgs,
}
/// Spawn a container and execute the VM inside.
#[derive(Debug, Args)]
struct IsolateCmdArgs {
/// Path to container image.
#[arg(long)]
image: PathBuf,
/// Set these env vars in the container. If VM executes a command, these
/// env vars will also be prepended to the command.
#[arg(long)]
setenv: Vec<KvPair>,
/// Args for run command
#[clap(flatten)]
run_cmd_args: RunCmdArgs,
}
/// Actually starting the VM. This needs to be inside an ephemeral container as
/// lots of resources relies on container for clean up.
fn run(args: &RunCmdArgs) -> Result<()> {
if!is_isolated()? {
return Err(anyhow!("run must be called from inside container"));
}
debug!("RuntimeOpts: {:?}", args.runtime_spec);
debug!("MachineOpts: {:?}", args.machine_spec);
debug!("VMArgs: {:?}", args.vm_args);
set_runtime(args.runtime_spec.clone().into_inner())
.map_err(|_| anyhow!("Failed to set runtime"))?;
Ok(VM::new(args.machine_spec.clone().into_inner(), args.vm_args.clone())?.run()?)
}
/// Enter isolated container and then respawn itself inside it with `run`
/// command and its parameters.
fn respawn(args: &IsolateCmdArgs) -> Result<()> {
let mut envs = default_passthrough_envs();
envs.extend(args.setenv.clone());
let mut vm_args = args.run_cmd_args.vm_args.clone();
vm_args.command_envs = envs.clone();
// Let's always capture console output unless it's console mode
let _console_dir;
if!vm_args.mode.console && vm_args.console_output_file.is_none() {
let dir = tempdir().context("Failed to create temp dir for console output")?;
vm_args.console_output_file = Some(dir.path().join("console.txt"));
_console_dir = dir;
}
let isolated = isolated(&args.image, envs, vm_args.get_container_output_dirs())?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(vm_args.to_args());
log_command(&mut command).status()?;
Ok(())
}
/// Merge all sources of our envs into final list of env vars we should use
/// everywhere for tests. Dedup is handled by functions that use the result.
fn get_test_envs(from_cli: &[KvPair]) -> Vec<KvPair> {
// This handles common envs like RUST_LOG
let mut envs = default_passthrough_envs();
envs.extend_from_slice(from_cli);
// forward test runner env vars to the inner test
for (key, val) in std::env::vars() {
if key.starts_with("TEST_PILOT") {
envs.push((key, OsString::from(val)).into());
}
}
envs
}
/// Validated `VMArgs` and other necessary metadata for tests.
struct ValidatedVMArgs {
/// VMArgs that will be passed into the VM with modified fields
inner: VMArgs,
/// True if the test command is listing tests
is_list: bool,
}
/// Further validate `VMArgs` parsed by clap and generate a new `VMArgs` with
/// content specific to test execution.
fn get_test_vm_args(orig_args: &VMArgs, cli_envs: &[KvPair]) -> Result<ValidatedVMArgs> {
if orig_args.timeout_secs.is_none() |
if!orig_args.output_dirs.is_empty() {
return Err(anyhow!(
"Test command must not specify --output-dirs. \
This will be parsed from env and test command parameters instead."
));
}
let envs = get_test_envs(cli_envs);
#[derive(Debug, Parser)]
struct TestArgsParser {
#[clap(subcommand)]
test: Test,
}
let mut orig_command = vec![OsString::from("bogus_exec")];
orig_command.extend_from_slice(
&orig_args
.mode
.command
.clone()
.ok_or(anyhow!("Test command must not be empty"))?,
);
let test_args = TestArgsParser::try_parse_from(orig_command)
.context("Test command does not match expected format of `<type> <command>`")?;
let is_list = test_args.test.is_list_tests();
let mut vm_args = orig_args.clone();
vm_args.output_dirs = test_args.test.output_dirs().into_iter().collect();
vm_args.mode.command = Some(test_args.test.into_inner_cmd());
vm_args.command_envs = envs;
vm_args.console_output_file = console_output_path_for_tpx()?;
Ok(ValidatedVMArgs {
inner: vm_args,
is_list,
})
}
/// For some tests, an explicit "list test" step is run against the test binary
/// to discover the tests to run. This command is not our intended test to
/// execute, so it's unnecessarily wasteful to execute it inside the VM. We
/// directly run it inside the container without booting VM.
fn list_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let mut output_dirs = validated_args.inner.get_container_output_dirs();
// RW bind-mount /dev/fuse for running XAR.
// More details in antlir/antlir2/testing/image_test/src/main.rs.
output_dirs.insert(PathBuf::from("/dev/fuse"));
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
output_dirs,
)?;
let mut inner_cmd = validated_args
.inner
.mode
.command
.as_ref()
.expect("command must exist here")
.iter();
let mut command = isolated.command(inner_cmd.next().expect("must have program arg"))?;
command.args(inner_cmd);
Ok(command)
}
/// For actual test command, we spawn the VM and run it.
fn vm_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
validated_args.inner.get_container_output_dirs(),
)?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(validated_args.inner.to_args());
Ok(command)
}
/// `test` is similar to `respawn`, except that we assume control for some
/// inputs instead of allowing caller to pass them in. Some inputs are parsed
/// from the test command.
fn test(args: &IsolateCmdArgs) -> Result<()> {
let validated_args = get_test_vm_args(&args.run_cmd_args.vm_args, &args.setenv)?;
let mut command = if validated_args.is_list {
list_test_command(args, &validated_args)
} else {
vm_test_command(args, &validated_args)
}?;
log_command(&mut command).status()?;
Ok(())
}
fn main() -> Result<()> {
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::Layer::default())
.with(
tracing_subscriber::EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env()
.expect("Invalid logging level set by env"),
)
.init();
Platform::set()?;
debug!("Args: {:?}", env::args());
let cli = Cli::parse();
match &cli.command {
Commands::Isolate(args) => respawn(args),
Commands::Run(args) => run(args),
Commands::Test(args) => test(args),
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::types::VMModeArgs;
#[test]
fn test_get_test_envs() {
env::set_var("RUST_LOG", "hello");
env::set_var("TEST_PILOT_A", "A");
let from_cli = vec![KvPair::from(("foo", "bar"))];
assert_eq!(
get_test_envs(&from_cli),
vec![
KvPair::from(("RUST_LOG", "hello")),
KvPair::from(("foo", "bar")),
KvPair::from(("TEST_PILOT_A", "A")),
],
)
}
#[test]
fn test_get_test_vm_args() {
let valid = VMArgs {
timeout_secs: Some(1),
mode: VMModeArgs {
command: Some(["custom", "whatever"].iter().map(OsString::from).collect()),
..Default::default()
},
..Default::default()
};
let empty_env = Vec::<KvPair>::new();
let mut expected = valid.clone();
expected.mode.command = Some(vec![OsString::from("whatever")]);
let parsed = get_test_vm_args(&valid, &empty_env).expect("Parsing should succeed");
assert_eq!(parsed.inner, expected);
assert!(!parsed.is_list);
let mut timeout = valid.clone();
timeout.timeout_secs = None;
assert!(get_test_vm_args(&timeout, &empty_env).is_err());
let mut output_dirs = valid.clone();
output_dirs.output_dirs = vec![PathBuf::from("/some")];
assert!(get_test_vm_args(&output_dirs, &empty_env).is_err());
let mut command = valid;
command.mode.command = None;
assert!(get_test_vm_args(&command, &empty_env).is_err());
command.mode.command = Some(vec![OsString::from("invalid")]);
assert!(get_test_vm_args(&command, &empty_env).is_err());
}
}
| {
return Err(anyhow!("Test command must specify --timeout-secs."));
} | conditional_block |
main.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
mod disk;
mod isolation;
mod net;
mod runtime;
mod share;
mod ssh;
mod types;
mod utils;
mod vm;
use std::env;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process::Command;
use anyhow::anyhow;
use anyhow::Context;
use clap::Args;
use clap::Parser;
use clap::Subcommand;
use image_test_lib::KvPair;
use image_test_lib::Test;
use json_arg::JsonFile;
use tempfile::tempdir;
use tracing::debug;
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::prelude::*;
use crate::isolation::default_passthrough_envs;
use crate::isolation::is_isolated;
use crate::isolation::isolated;
use crate::isolation::Platform;
use crate::runtime::set_runtime;
use crate::types::MachineOpts;
use crate::types::RuntimeOpts;
use crate::types::VMArgs;
use crate::utils::console_output_path_for_tpx;
use crate::utils::log_command;
use crate::vm::VM;
type Result<T> = std::result::Result<T, anyhow::Error>;
#[derive(Debug, Parser)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Debug, Subcommand)]
enum Commands {
/// Run the VM. Must be executed inside container.
Run(RunCmdArgs),
/// Respawn inside isolated image and execute `Run` command.
Isolate(IsolateCmdArgs),
/// Run VM tests inside container.
Test(IsolateCmdArgs),
}
/// Execute the VM
#[derive(Debug, Args)]
struct RunCmdArgs {
/// Json-encoded file for VM machine configuration
#[arg(long)]
machine_spec: JsonFile<MachineOpts>,
/// Json-encoded file describing paths of binaries required by VM
#[arg(long)]
runtime_spec: JsonFile<RuntimeOpts>,
#[clap(flatten)]
vm_args: VMArgs,
}
/// Spawn a container and execute the VM inside.
#[derive(Debug, Args)]
struct IsolateCmdArgs {
/// Path to container image.
#[arg(long)]
image: PathBuf,
/// Set these env vars in the container. If VM executes a command, these
/// env vars will also be prepended to the command.
#[arg(long)]
setenv: Vec<KvPair>,
/// Args for run command
#[clap(flatten)]
run_cmd_args: RunCmdArgs,
}
/// Actually starting the VM. This needs to be inside an ephemeral container as
/// lots of resources relies on container for clean up.
fn run(args: &RunCmdArgs) -> Result<()> {
if!is_isolated()? {
return Err(anyhow!("run must be called from inside container"));
}
debug!("RuntimeOpts: {:?}", args.runtime_spec);
debug!("MachineOpts: {:?}", args.machine_spec);
debug!("VMArgs: {:?}", args.vm_args);
set_runtime(args.runtime_spec.clone().into_inner())
.map_err(|_| anyhow!("Failed to set runtime"))?;
Ok(VM::new(args.machine_spec.clone().into_inner(), args.vm_args.clone())?.run()?)
}
/// Enter isolated container and then respawn itself inside it with `run`
/// command and its parameters.
fn respawn(args: &IsolateCmdArgs) -> Result<()> {
let mut envs = default_passthrough_envs();
envs.extend(args.setenv.clone());
let mut vm_args = args.run_cmd_args.vm_args.clone();
vm_args.command_envs = envs.clone();
// Let's always capture console output unless it's console mode
let _console_dir;
if!vm_args.mode.console && vm_args.console_output_file.is_none() {
let dir = tempdir().context("Failed to create temp dir for console output")?;
vm_args.console_output_file = Some(dir.path().join("console.txt"));
_console_dir = dir;
}
let isolated = isolated(&args.image, envs, vm_args.get_container_output_dirs())?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(vm_args.to_args());
log_command(&mut command).status()?;
Ok(())
}
/// Merge all sources of our envs into final list of env vars we should use
/// everywhere for tests. Dedup is handled by functions that use the result.
fn get_test_envs(from_cli: &[KvPair]) -> Vec<KvPair> {
// This handles common envs like RUST_LOG
let mut envs = default_passthrough_envs();
envs.extend_from_slice(from_cli);
// forward test runner env vars to the inner test
for (key, val) in std::env::vars() {
if key.starts_with("TEST_PILOT") {
envs.push((key, OsString::from(val)).into());
}
}
envs
}
/// Validated `VMArgs` and other necessary metadata for tests.
struct ValidatedVMArgs {
/// VMArgs that will be passed into the VM with modified fields
inner: VMArgs,
/// True if the test command is listing tests
is_list: bool,
}
/// Further validate `VMArgs` parsed by clap and generate a new `VMArgs` with
/// content specific to test execution.
fn get_test_vm_args(orig_args: &VMArgs, cli_envs: &[KvPair]) -> Result<ValidatedVMArgs> {
if orig_args.timeout_secs.is_none() {
return Err(anyhow!("Test command must specify --timeout-secs."));
}
if!orig_args.output_dirs.is_empty() {
return Err(anyhow!(
"Test command must not specify --output-dirs. \
This will be parsed from env and test command parameters instead."
));
}
let envs = get_test_envs(cli_envs);
#[derive(Debug, Parser)]
struct TestArgsParser {
#[clap(subcommand)]
test: Test,
}
let mut orig_command = vec![OsString::from("bogus_exec")];
orig_command.extend_from_slice(
&orig_args
.mode
.command
.clone()
.ok_or(anyhow!("Test command must not be empty"))?,
);
let test_args = TestArgsParser::try_parse_from(orig_command)
.context("Test command does not match expected format of `<type> <command>`")?;
let is_list = test_args.test.is_list_tests();
let mut vm_args = orig_args.clone();
vm_args.output_dirs = test_args.test.output_dirs().into_iter().collect();
vm_args.mode.command = Some(test_args.test.into_inner_cmd());
vm_args.command_envs = envs;
vm_args.console_output_file = console_output_path_for_tpx()?;
Ok(ValidatedVMArgs {
inner: vm_args,
is_list,
})
}
/// For some tests, an explicit "list test" step is run against the test binary
/// to discover the tests to run. This command is not our intended test to
/// execute, so it's unnecessarily wasteful to execute it inside the VM. We
/// directly run it inside the container without booting VM.
fn list_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let mut output_dirs = validated_args.inner.get_container_output_dirs();
// RW bind-mount /dev/fuse for running XAR.
// More details in antlir/antlir2/testing/image_test/src/main.rs.
output_dirs.insert(PathBuf::from("/dev/fuse"));
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
output_dirs,
)?;
let mut inner_cmd = validated_args
.inner
.mode
.command
.as_ref()
.expect("command must exist here")
.iter();
let mut command = isolated.command(inner_cmd.next().expect("must have program arg"))?;
command.args(inner_cmd);
Ok(command)
}
/// For actual test command, we spawn the VM and run it.
fn vm_test_command(args: &IsolateCmdArgs, validated_args: &ValidatedVMArgs) -> Result<Command> {
let isolated = isolated(
&args.image,
validated_args.inner.command_envs.clone(),
validated_args.inner.get_container_output_dirs(),
)?;
let exe = env::current_exe().context("while getting argv[0]")?;
let mut command = isolated.command(exe)?;
command
.arg("run")
.arg("--machine-spec")
.arg(args.run_cmd_args.machine_spec.path())
.arg("--runtime-spec")
.arg(args.run_cmd_args.runtime_spec.path())
.args(validated_args.inner.to_args());
Ok(command)
}
/// `test` is similar to `respawn`, except that we assume control for some
/// inputs instead of allowing caller to pass them in. Some inputs are parsed
/// from the test command.
fn test(args: &IsolateCmdArgs) -> Result<()> {
let validated_args = get_test_vm_args(&args.run_cmd_args.vm_args, &args.setenv)?;
let mut command = if validated_args.is_list {
list_test_command(args, &validated_args)
} else {
vm_test_command(args, &validated_args)
}?;
log_command(&mut command).status()?;
Ok(())
}
fn main() -> Result<()> {
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::Layer::default()) | )
.init();
Platform::set()?;
debug!("Args: {:?}", env::args());
let cli = Cli::parse();
match &cli.command {
Commands::Isolate(args) => respawn(args),
Commands::Run(args) => run(args),
Commands::Test(args) => test(args),
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::types::VMModeArgs;
#[test]
fn test_get_test_envs() {
env::set_var("RUST_LOG", "hello");
env::set_var("TEST_PILOT_A", "A");
let from_cli = vec![KvPair::from(("foo", "bar"))];
assert_eq!(
get_test_envs(&from_cli),
vec![
KvPair::from(("RUST_LOG", "hello")),
KvPair::from(("foo", "bar")),
KvPair::from(("TEST_PILOT_A", "A")),
],
)
}
#[test]
fn test_get_test_vm_args() {
let valid = VMArgs {
timeout_secs: Some(1),
mode: VMModeArgs {
command: Some(["custom", "whatever"].iter().map(OsString::from).collect()),
..Default::default()
},
..Default::default()
};
let empty_env = Vec::<KvPair>::new();
let mut expected = valid.clone();
expected.mode.command = Some(vec![OsString::from("whatever")]);
let parsed = get_test_vm_args(&valid, &empty_env).expect("Parsing should succeed");
assert_eq!(parsed.inner, expected);
assert!(!parsed.is_list);
let mut timeout = valid.clone();
timeout.timeout_secs = None;
assert!(get_test_vm_args(&timeout, &empty_env).is_err());
let mut output_dirs = valid.clone();
output_dirs.output_dirs = vec![PathBuf::from("/some")];
assert!(get_test_vm_args(&output_dirs, &empty_env).is_err());
let mut command = valid;
command.mode.command = None;
assert!(get_test_vm_args(&command, &empty_env).is_err());
command.mode.command = Some(vec![OsString::from("invalid")]);
assert!(get_test_vm_args(&command, &empty_env).is_err());
}
} | .with(
tracing_subscriber::EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env()
.expect("Invalid logging level set by env"), | random_line_split |
error.rs | //! 9P error representations.
//!
//! In 9P2000 errors are represented as strings.
//! All the error strings in this module are imported from include/net/9p/error.c of Linux kernel.
//!
//! By contrast, in 9P2000.L, errors are represented as numbers (errno).
//! Using the Linux system errno numbers is the expected behaviour.
extern crate nix;
use error::errno::*;
use std::error as stderror;
use std::io::ErrorKind::*;
use std::{fmt, io};
fn errno_from_ioerror(e: &io::Error) -> nix::errno::Errno {
e.raw_os_error()
.map(nix::errno::from_i32)
.unwrap_or_else(|| match e.kind() {
NotFound => ENOENT,
PermissionDenied => EPERM,
ConnectionRefused => ECONNREFUSED,
ConnectionReset => ECONNRESET,
ConnectionAborted => ECONNABORTED,
NotConnected => ENOTCONN,
AddrInUse => EADDRINUSE,
AddrNotAvailable => EADDRNOTAVAIL,
BrokenPipe => EPIPE,
AlreadyExists => EALREADY,
WouldBlock => EAGAIN,
InvalidInput => EINVAL,
InvalidData => EINVAL,
TimedOut => ETIMEDOUT,
WriteZero => EAGAIN,
Interrupted => EINTR,
Other | _ => EIO,
})
}
/// 9P error type which is convertible to an errno.
///
/// The value of `Error::errno()` will be used for Rlerror.
///
/// # Protocol
/// 9P2000.L
#[derive(Debug)]
pub enum Error {
/// System error containing an errno.
No(nix::errno::Errno),
/// I/O error.
Io(io::Error),
}
impl Error {
/// Get an errno representations.
pub fn errno(&self) -> nix::errno::Errno {
match *self {
Error::No(ref e) => e.clone(),
Error::Io(ref e) => errno_from_ioerror(e),
}
} | }
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::No(ref e) => write!(f, "System error: {}", e.desc()),
Error::Io(ref e) => write!(f, "I/O error: {}", e),
}
}
}
impl stderror::Error for Error {
fn description(&self) -> &str {
match *self {
Error::No(ref e) => e.desc(),
Error::Io(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&stderror::Error> {
match *self {
Error::No(_) => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
}
}
impl<'a> From<&'a io::Error> for Error {
fn from(e: &'a io::Error) -> Self {
Error::No(errno_from_ioerror(e))
}
}
impl From<nix::errno::Errno> for Error {
fn from(e: nix::errno::Errno) -> Self {
Error::No(e)
}
}
impl From<nix::Error> for Error {
fn from(e: nix::Error) -> Self {
Error::No(e.errno())
}
}
/// The system errno definitions.
///
/// # Protocol
/// 9P2000.L
pub mod errno {
extern crate nix;
pub use self::nix::errno::Errno::*;
}
/// 9P error strings imported from Linux.
///
/// # Protocol
/// 9P2000
pub mod string {
pub const EPERM: &'static str = "Operation not permitted";
pub const EPERM_WSTAT: &'static str = "wstat prohibited";
pub const ENOENT: &'static str = "No such file or directory";
pub const ENOENT_DIR: &'static str = "directory entry not found";
pub const ENOENT_FILE: &'static str = "file not found";
pub const EINTR: &'static str = "Interrupted system call";
pub const EIO: &'static str = "Input/output error";
pub const ENXIO: &'static str = "No such device or address";
pub const E2BIG: &'static str = "Argument list too long";
pub const EBADF: &'static str = "Bad file descriptor";
pub const EAGAIN: &'static str = "Resource temporarily unavailable";
pub const ENOMEM: &'static str = "Cannot allocate memory";
pub const EACCES: &'static str = "Permission denied";
pub const EFAULT: &'static str = "Bad address";
pub const ENOTBLK: &'static str = "Block device required";
pub const EBUSY: &'static str = "Device or resource busy";
pub const EEXIST: &'static str = "File exists";
pub const EXDEV: &'static str = "Invalid cross-device link";
pub const ENODEV: &'static str = "No such device";
pub const ENOTDIR: &'static str = "Not a directory";
pub const EISDIR: &'static str = "Is a directory";
pub const EINVAL: &'static str = "Invalid argument";
pub const ENFILE: &'static str = "Too many open files in system";
pub const EMFILE: &'static str = "Too many open files";
pub const ETXTBSY: &'static str = "Text file busy";
pub const EFBIG: &'static str = "File too large";
pub const ENOSPC: &'static str = "No space left on device";
pub const ESPIPE: &'static str = "Illegal seek";
pub const EROFS: &'static str = "Read-only file system";
pub const EMLINK: &'static str = "Too many links";
pub const EPIPE: &'static str = "Broken pipe";
pub const EDOM: &'static str = "Numerical argument out of domain";
pub const ERANGE: &'static str = "Numerical result out of range";
pub const EDEADLK: &'static str = "Resource deadlock avoided";
pub const ENAMETOOLONG: &'static str = "File name too long";
pub const ENOLCK: &'static str = "No locks available";
pub const ENOSYS: &'static str = "Function not implemented";
pub const ENOTEMPTY: &'static str = "Directory not empty";
pub const ELOOP: &'static str = "Too many levels of symbolic links";
pub const ENOMSG: &'static str = "No message of desired type";
pub const EIDRM: &'static str = "Identifier removed";
pub const ENODATA: &'static str = "No data available";
pub const ENONET: &'static str = "Machine is not on the network";
pub const ENOPKG: &'static str = "Package not installed";
pub const EREMOTE: &'static str = "Object is remote";
pub const ENOLINK: &'static str = "Link has been severed";
pub const ECOMM: &'static str = "Communication error on send";
pub const EPROTO: &'static str = "Protocol error";
pub const EBADMSG: &'static str = "Bad message";
pub const EBADFD: &'static str = "File descriptor in bad state";
pub const ESTRPIPE: &'static str = "Streams pipe error";
pub const EUSERS: &'static str = "Too many users";
pub const ENOTSOCK: &'static str = "Socket operation on non-socket";
pub const EMSGSIZE: &'static str = "Message too long";
pub const ENOPROTOOPT: &'static str = "Protocol not available";
pub const EPROTONOSUPPORT: &'static str = "Protocol not supported";
pub const ESOCKTNOSUPPORT: &'static str = "Socket type not supported";
pub const EOPNOTSUPP: &'static str = "Operation not supported";
pub const EPFNOSUPPORT: &'static str = "Protocol family not supported";
pub const ENETDOWN: &'static str = "Network is down";
pub const ENETUNREACH: &'static str = "Network is unreachable";
pub const ENETRESET: &'static str = "Network dropped connection on reset";
pub const ECONNABORTED: &'static str = "Software caused connection abort";
pub const ECONNRESET: &'static str = "Connection reset by peer";
pub const ENOBUFS: &'static str = "No buffer space available";
pub const EISCONN: &'static str = "Transport endpoint is already connected";
pub const ENOTCONN: &'static str = "Transport endpoint is not connected";
pub const ESHUTDOWN: &'static str = "Cannot send after transport endpoint shutdown";
pub const ETIMEDOUT: &'static str = "Connection timed out";
pub const ECONNREFUSED: &'static str = "Connection refused";
pub const EHOSTDOWN: &'static str = "Host is down";
pub const EHOSTUNREACH: &'static str = "No route to host";
pub const EALREADY: &'static str = "Operation already in progress";
pub const EINPROGRESS: &'static str = "Operation now in progress";
pub const EISNAM: &'static str = "Is a named type file";
pub const EREMOTEIO: &'static str = "Remote I/O error";
pub const EDQUOT: &'static str = "Disk quota exceeded";
pub const EBADF2: &'static str = "fid unknown or out of range";
pub const EACCES2: &'static str = "permission denied";
pub const ENOENT_FILE2: &'static str = "file does not exist";
pub const ECONNREFUSED2: &'static str = "authentication failed";
pub const ESPIPE2: &'static str = "bad offset in directory read";
pub const EBADF3: &'static str = "bad use of fid";
pub const EPERM_CONV: &'static str = "wstat can't convert between files and directories";
pub const ENOTEMPTY2: &'static str = "directory is not empty";
pub const EEXIST2: &'static str = "file exists";
pub const EEXIST3: &'static str = "file already exists";
pub const EEXIST4: &'static str = "file or directory already exists";
pub const EBADF4: &'static str = "fid already in use";
pub const ETXTBSY2: &'static str = "file in use";
pub const EIO2: &'static str = "i/o error";
pub const ETXTBSY3: &'static str = "file already open for I/O";
pub const EINVAL2: &'static str = "illegal mode";
pub const ENAMETOOLONG2: &'static str = "illegal name";
pub const ENOTDIR2: &'static str = "not a directory";
pub const EPERM_GRP: &'static str = "not a member of proposed group";
pub const EACCES3: &'static str = "not owner";
pub const EACCES4: &'static str = "only owner can change group in wstat";
pub const EROFS2: &'static str = "read only file system";
pub const EPERM_SPFILE: &'static str = "no access to special file";
pub const EIO3: &'static str = "i/o count too large";
pub const EINVAL3: &'static str = "unknown group";
pub const EINVAL4: &'static str = "unknown user";
pub const EPROTO2: &'static str = "bogus wstat buffer";
pub const EAGAIN2: &'static str = "exclusive use file already open";
pub const EIO4: &'static str = "corrupted directory entry";
pub const EIO5: &'static str = "corrupted file entry";
pub const EIO6: &'static str = "corrupted block label";
pub const EIO7: &'static str = "corrupted meta data";
pub const EINVAL5: &'static str = "illegal offset";
pub const ENOENT_PATH: &'static str = "illegal path element";
pub const EIO8: &'static str = "root of file system is corrupted";
pub const EIO9: &'static str = "corrupted super block";
pub const EPROTO3: &'static str = "protocol botch";
pub const ENOSPC2: &'static str = "file system is full";
pub const EAGAIN3: &'static str = "file is in use";
pub const ENOENT_ALLOC: &'static str = "directory entry is not allocated";
pub const EROFS3: &'static str = "file is read only";
pub const EIDRM2: &'static str = "file has been removed";
pub const EPERM_TRUNCATE: &'static str = "only support truncation to zero length";
pub const EPERM_RMROOT: &'static str = "cannot remove root";
pub const EFBIG2: &'static str = "file too big";
pub const EIO10: &'static str = "venti i/o error";
} | random_line_split |
|
error.rs | //! 9P error representations.
//!
//! In 9P2000 errors are represented as strings.
//! All the error strings in this module are imported from include/net/9p/error.c of Linux kernel.
//!
//! By contrast, in 9P2000.L, errors are represented as numbers (errno).
//! Using the Linux system errno numbers is the expected behaviour.
extern crate nix;
use error::errno::*;
use std::error as stderror;
use std::io::ErrorKind::*;
use std::{fmt, io};
fn errno_from_ioerror(e: &io::Error) -> nix::errno::Errno {
e.raw_os_error()
.map(nix::errno::from_i32)
.unwrap_or_else(|| match e.kind() {
NotFound => ENOENT,
PermissionDenied => EPERM,
ConnectionRefused => ECONNREFUSED,
ConnectionReset => ECONNRESET,
ConnectionAborted => ECONNABORTED,
NotConnected => ENOTCONN,
AddrInUse => EADDRINUSE,
AddrNotAvailable => EADDRNOTAVAIL,
BrokenPipe => EPIPE,
AlreadyExists => EALREADY,
WouldBlock => EAGAIN,
InvalidInput => EINVAL,
InvalidData => EINVAL,
TimedOut => ETIMEDOUT,
WriteZero => EAGAIN,
Interrupted => EINTR,
Other | _ => EIO,
})
}
/// 9P error type which is convertible to an errno.
///
/// The value of `Error::errno()` will be used for Rlerror.
///
/// # Protocol
/// 9P2000.L
#[derive(Debug)]
pub enum Error {
/// System error containing an errno.
No(nix::errno::Errno),
/// I/O error.
Io(io::Error),
}
impl Error {
/// Get an errno representations.
pub fn errno(&self) -> nix::errno::Errno {
match *self {
Error::No(ref e) => e.clone(),
Error::Io(ref e) => errno_from_ioerror(e),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::No(ref e) => write!(f, "System error: {}", e.desc()),
Error::Io(ref e) => write!(f, "I/O error: {}", e),
}
}
}
impl stderror::Error for Error {
fn description(&self) -> &str {
match *self {
Error::No(ref e) => e.desc(),
Error::Io(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&stderror::Error> {
match *self {
Error::No(_) => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
}
}
impl<'a> From<&'a io::Error> for Error {
fn from(e: &'a io::Error) -> Self {
Error::No(errno_from_ioerror(e))
}
}
impl From<nix::errno::Errno> for Error {
fn from(e: nix::errno::Errno) -> Self |
}
impl From<nix::Error> for Error {
fn from(e: nix::Error) -> Self {
Error::No(e.errno())
}
}
/// The system errno definitions.
///
/// # Protocol
/// 9P2000.L
pub mod errno {
extern crate nix;
pub use self::nix::errno::Errno::*;
}
/// 9P error strings imported from Linux.
///
/// # Protocol
/// 9P2000
pub mod string {
pub const EPERM: &'static str = "Operation not permitted";
pub const EPERM_WSTAT: &'static str = "wstat prohibited";
pub const ENOENT: &'static str = "No such file or directory";
pub const ENOENT_DIR: &'static str = "directory entry not found";
pub const ENOENT_FILE: &'static str = "file not found";
pub const EINTR: &'static str = "Interrupted system call";
pub const EIO: &'static str = "Input/output error";
pub const ENXIO: &'static str = "No such device or address";
pub const E2BIG: &'static str = "Argument list too long";
pub const EBADF: &'static str = "Bad file descriptor";
pub const EAGAIN: &'static str = "Resource temporarily unavailable";
pub const ENOMEM: &'static str = "Cannot allocate memory";
pub const EACCES: &'static str = "Permission denied";
pub const EFAULT: &'static str = "Bad address";
pub const ENOTBLK: &'static str = "Block device required";
pub const EBUSY: &'static str = "Device or resource busy";
pub const EEXIST: &'static str = "File exists";
pub const EXDEV: &'static str = "Invalid cross-device link";
pub const ENODEV: &'static str = "No such device";
pub const ENOTDIR: &'static str = "Not a directory";
pub const EISDIR: &'static str = "Is a directory";
pub const EINVAL: &'static str = "Invalid argument";
pub const ENFILE: &'static str = "Too many open files in system";
pub const EMFILE: &'static str = "Too many open files";
pub const ETXTBSY: &'static str = "Text file busy";
pub const EFBIG: &'static str = "File too large";
pub const ENOSPC: &'static str = "No space left on device";
pub const ESPIPE: &'static str = "Illegal seek";
pub const EROFS: &'static str = "Read-only file system";
pub const EMLINK: &'static str = "Too many links";
pub const EPIPE: &'static str = "Broken pipe";
pub const EDOM: &'static str = "Numerical argument out of domain";
pub const ERANGE: &'static str = "Numerical result out of range";
pub const EDEADLK: &'static str = "Resource deadlock avoided";
pub const ENAMETOOLONG: &'static str = "File name too long";
pub const ENOLCK: &'static str = "No locks available";
pub const ENOSYS: &'static str = "Function not implemented";
pub const ENOTEMPTY: &'static str = "Directory not empty";
pub const ELOOP: &'static str = "Too many levels of symbolic links";
pub const ENOMSG: &'static str = "No message of desired type";
pub const EIDRM: &'static str = "Identifier removed";
pub const ENODATA: &'static str = "No data available";
pub const ENONET: &'static str = "Machine is not on the network";
pub const ENOPKG: &'static str = "Package not installed";
pub const EREMOTE: &'static str = "Object is remote";
pub const ENOLINK: &'static str = "Link has been severed";
pub const ECOMM: &'static str = "Communication error on send";
pub const EPROTO: &'static str = "Protocol error";
pub const EBADMSG: &'static str = "Bad message";
pub const EBADFD: &'static str = "File descriptor in bad state";
pub const ESTRPIPE: &'static str = "Streams pipe error";
pub const EUSERS: &'static str = "Too many users";
pub const ENOTSOCK: &'static str = "Socket operation on non-socket";
pub const EMSGSIZE: &'static str = "Message too long";
pub const ENOPROTOOPT: &'static str = "Protocol not available";
pub const EPROTONOSUPPORT: &'static str = "Protocol not supported";
pub const ESOCKTNOSUPPORT: &'static str = "Socket type not supported";
pub const EOPNOTSUPP: &'static str = "Operation not supported";
pub const EPFNOSUPPORT: &'static str = "Protocol family not supported";
pub const ENETDOWN: &'static str = "Network is down";
pub const ENETUNREACH: &'static str = "Network is unreachable";
pub const ENETRESET: &'static str = "Network dropped connection on reset";
pub const ECONNABORTED: &'static str = "Software caused connection abort";
pub const ECONNRESET: &'static str = "Connection reset by peer";
pub const ENOBUFS: &'static str = "No buffer space available";
pub const EISCONN: &'static str = "Transport endpoint is already connected";
pub const ENOTCONN: &'static str = "Transport endpoint is not connected";
pub const ESHUTDOWN: &'static str = "Cannot send after transport endpoint shutdown";
pub const ETIMEDOUT: &'static str = "Connection timed out";
pub const ECONNREFUSED: &'static str = "Connection refused";
pub const EHOSTDOWN: &'static str = "Host is down";
pub const EHOSTUNREACH: &'static str = "No route to host";
pub const EALREADY: &'static str = "Operation already in progress";
pub const EINPROGRESS: &'static str = "Operation now in progress";
pub const EISNAM: &'static str = "Is a named type file";
pub const EREMOTEIO: &'static str = "Remote I/O error";
pub const EDQUOT: &'static str = "Disk quota exceeded";
pub const EBADF2: &'static str = "fid unknown or out of range";
pub const EACCES2: &'static str = "permission denied";
pub const ENOENT_FILE2: &'static str = "file does not exist";
pub const ECONNREFUSED2: &'static str = "authentication failed";
pub const ESPIPE2: &'static str = "bad offset in directory read";
pub const EBADF3: &'static str = "bad use of fid";
pub const EPERM_CONV: &'static str = "wstat can't convert between files and directories";
pub const ENOTEMPTY2: &'static str = "directory is not empty";
pub const EEXIST2: &'static str = "file exists";
pub const EEXIST3: &'static str = "file already exists";
pub const EEXIST4: &'static str = "file or directory already exists";
pub const EBADF4: &'static str = "fid already in use";
pub const ETXTBSY2: &'static str = "file in use";
pub const EIO2: &'static str = "i/o error";
pub const ETXTBSY3: &'static str = "file already open for I/O";
pub const EINVAL2: &'static str = "illegal mode";
pub const ENAMETOOLONG2: &'static str = "illegal name";
pub const ENOTDIR2: &'static str = "not a directory";
pub const EPERM_GRP: &'static str = "not a member of proposed group";
pub const EACCES3: &'static str = "not owner";
pub const EACCES4: &'static str = "only owner can change group in wstat";
pub const EROFS2: &'static str = "read only file system";
pub const EPERM_SPFILE: &'static str = "no access to special file";
pub const EIO3: &'static str = "i/o count too large";
pub const EINVAL3: &'static str = "unknown group";
pub const EINVAL4: &'static str = "unknown user";
pub const EPROTO2: &'static str = "bogus wstat buffer";
pub const EAGAIN2: &'static str = "exclusive use file already open";
pub const EIO4: &'static str = "corrupted directory entry";
pub const EIO5: &'static str = "corrupted file entry";
pub const EIO6: &'static str = "corrupted block label";
pub const EIO7: &'static str = "corrupted meta data";
pub const EINVAL5: &'static str = "illegal offset";
pub const ENOENT_PATH: &'static str = "illegal path element";
pub const EIO8: &'static str = "root of file system is corrupted";
pub const EIO9: &'static str = "corrupted super block";
pub const EPROTO3: &'static str = "protocol botch";
pub const ENOSPC2: &'static str = "file system is full";
pub const EAGAIN3: &'static str = "file is in use";
pub const ENOENT_ALLOC: &'static str = "directory entry is not allocated";
pub const EROFS3: &'static str = "file is read only";
pub const EIDRM2: &'static str = "file has been removed";
pub const EPERM_TRUNCATE: &'static str = "only support truncation to zero length";
pub const EPERM_RMROOT: &'static str = "cannot remove root";
pub const EFBIG2: &'static str = "file too big";
pub const EIO10: &'static str = "venti i/o error";
}
| {
Error::No(e)
} | identifier_body |
error.rs | //! 9P error representations.
//!
//! In 9P2000 errors are represented as strings.
//! All the error strings in this module are imported from include/net/9p/error.c of Linux kernel.
//!
//! By contrast, in 9P2000.L, errors are represented as numbers (errno).
//! Using the Linux system errno numbers is the expected behaviour.
extern crate nix;
use error::errno::*;
use std::error as stderror;
use std::io::ErrorKind::*;
use std::{fmt, io};
fn | (e: &io::Error) -> nix::errno::Errno {
e.raw_os_error()
.map(nix::errno::from_i32)
.unwrap_or_else(|| match e.kind() {
NotFound => ENOENT,
PermissionDenied => EPERM,
ConnectionRefused => ECONNREFUSED,
ConnectionReset => ECONNRESET,
ConnectionAborted => ECONNABORTED,
NotConnected => ENOTCONN,
AddrInUse => EADDRINUSE,
AddrNotAvailable => EADDRNOTAVAIL,
BrokenPipe => EPIPE,
AlreadyExists => EALREADY,
WouldBlock => EAGAIN,
InvalidInput => EINVAL,
InvalidData => EINVAL,
TimedOut => ETIMEDOUT,
WriteZero => EAGAIN,
Interrupted => EINTR,
Other | _ => EIO,
})
}
/// 9P error type which is convertible to an errno.
///
/// The value of `Error::errno()` will be used for Rlerror.
///
/// # Protocol
/// 9P2000.L
#[derive(Debug)]
pub enum Error {
/// System error containing an errno.
No(nix::errno::Errno),
/// I/O error.
Io(io::Error),
}
impl Error {
/// Get an errno representations.
pub fn errno(&self) -> nix::errno::Errno {
match *self {
Error::No(ref e) => e.clone(),
Error::Io(ref e) => errno_from_ioerror(e),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::No(ref e) => write!(f, "System error: {}", e.desc()),
Error::Io(ref e) => write!(f, "I/O error: {}", e),
}
}
}
impl stderror::Error for Error {
fn description(&self) -> &str {
match *self {
Error::No(ref e) => e.desc(),
Error::Io(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&stderror::Error> {
match *self {
Error::No(_) => None,
Error::Io(ref e) => Some(e),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::Io(e)
}
}
impl<'a> From<&'a io::Error> for Error {
fn from(e: &'a io::Error) -> Self {
Error::No(errno_from_ioerror(e))
}
}
impl From<nix::errno::Errno> for Error {
fn from(e: nix::errno::Errno) -> Self {
Error::No(e)
}
}
impl From<nix::Error> for Error {
fn from(e: nix::Error) -> Self {
Error::No(e.errno())
}
}
/// The system errno definitions.
///
/// # Protocol
/// 9P2000.L
pub mod errno {
extern crate nix;
pub use self::nix::errno::Errno::*;
}
/// 9P error strings imported from Linux.
///
/// # Protocol
/// 9P2000
pub mod string {
pub const EPERM: &'static str = "Operation not permitted";
pub const EPERM_WSTAT: &'static str = "wstat prohibited";
pub const ENOENT: &'static str = "No such file or directory";
pub const ENOENT_DIR: &'static str = "directory entry not found";
pub const ENOENT_FILE: &'static str = "file not found";
pub const EINTR: &'static str = "Interrupted system call";
pub const EIO: &'static str = "Input/output error";
pub const ENXIO: &'static str = "No such device or address";
pub const E2BIG: &'static str = "Argument list too long";
pub const EBADF: &'static str = "Bad file descriptor";
pub const EAGAIN: &'static str = "Resource temporarily unavailable";
pub const ENOMEM: &'static str = "Cannot allocate memory";
pub const EACCES: &'static str = "Permission denied";
pub const EFAULT: &'static str = "Bad address";
pub const ENOTBLK: &'static str = "Block device required";
pub const EBUSY: &'static str = "Device or resource busy";
pub const EEXIST: &'static str = "File exists";
pub const EXDEV: &'static str = "Invalid cross-device link";
pub const ENODEV: &'static str = "No such device";
pub const ENOTDIR: &'static str = "Not a directory";
pub const EISDIR: &'static str = "Is a directory";
pub const EINVAL: &'static str = "Invalid argument";
pub const ENFILE: &'static str = "Too many open files in system";
pub const EMFILE: &'static str = "Too many open files";
pub const ETXTBSY: &'static str = "Text file busy";
pub const EFBIG: &'static str = "File too large";
pub const ENOSPC: &'static str = "No space left on device";
pub const ESPIPE: &'static str = "Illegal seek";
pub const EROFS: &'static str = "Read-only file system";
pub const EMLINK: &'static str = "Too many links";
pub const EPIPE: &'static str = "Broken pipe";
pub const EDOM: &'static str = "Numerical argument out of domain";
pub const ERANGE: &'static str = "Numerical result out of range";
pub const EDEADLK: &'static str = "Resource deadlock avoided";
pub const ENAMETOOLONG: &'static str = "File name too long";
pub const ENOLCK: &'static str = "No locks available";
pub const ENOSYS: &'static str = "Function not implemented";
pub const ENOTEMPTY: &'static str = "Directory not empty";
pub const ELOOP: &'static str = "Too many levels of symbolic links";
pub const ENOMSG: &'static str = "No message of desired type";
pub const EIDRM: &'static str = "Identifier removed";
pub const ENODATA: &'static str = "No data available";
pub const ENONET: &'static str = "Machine is not on the network";
pub const ENOPKG: &'static str = "Package not installed";
pub const EREMOTE: &'static str = "Object is remote";
pub const ENOLINK: &'static str = "Link has been severed";
pub const ECOMM: &'static str = "Communication error on send";
pub const EPROTO: &'static str = "Protocol error";
pub const EBADMSG: &'static str = "Bad message";
pub const EBADFD: &'static str = "File descriptor in bad state";
pub const ESTRPIPE: &'static str = "Streams pipe error";
pub const EUSERS: &'static str = "Too many users";
pub const ENOTSOCK: &'static str = "Socket operation on non-socket";
pub const EMSGSIZE: &'static str = "Message too long";
pub const ENOPROTOOPT: &'static str = "Protocol not available";
pub const EPROTONOSUPPORT: &'static str = "Protocol not supported";
pub const ESOCKTNOSUPPORT: &'static str = "Socket type not supported";
pub const EOPNOTSUPP: &'static str = "Operation not supported";
pub const EPFNOSUPPORT: &'static str = "Protocol family not supported";
pub const ENETDOWN: &'static str = "Network is down";
pub const ENETUNREACH: &'static str = "Network is unreachable";
pub const ENETRESET: &'static str = "Network dropped connection on reset";
pub const ECONNABORTED: &'static str = "Software caused connection abort";
pub const ECONNRESET: &'static str = "Connection reset by peer";
pub const ENOBUFS: &'static str = "No buffer space available";
pub const EISCONN: &'static str = "Transport endpoint is already connected";
pub const ENOTCONN: &'static str = "Transport endpoint is not connected";
pub const ESHUTDOWN: &'static str = "Cannot send after transport endpoint shutdown";
pub const ETIMEDOUT: &'static str = "Connection timed out";
pub const ECONNREFUSED: &'static str = "Connection refused";
pub const EHOSTDOWN: &'static str = "Host is down";
pub const EHOSTUNREACH: &'static str = "No route to host";
pub const EALREADY: &'static str = "Operation already in progress";
pub const EINPROGRESS: &'static str = "Operation now in progress";
pub const EISNAM: &'static str = "Is a named type file";
pub const EREMOTEIO: &'static str = "Remote I/O error";
pub const EDQUOT: &'static str = "Disk quota exceeded";
pub const EBADF2: &'static str = "fid unknown or out of range";
pub const EACCES2: &'static str = "permission denied";
pub const ENOENT_FILE2: &'static str = "file does not exist";
pub const ECONNREFUSED2: &'static str = "authentication failed";
pub const ESPIPE2: &'static str = "bad offset in directory read";
pub const EBADF3: &'static str = "bad use of fid";
pub const EPERM_CONV: &'static str = "wstat can't convert between files and directories";
pub const ENOTEMPTY2: &'static str = "directory is not empty";
pub const EEXIST2: &'static str = "file exists";
pub const EEXIST3: &'static str = "file already exists";
pub const EEXIST4: &'static str = "file or directory already exists";
pub const EBADF4: &'static str = "fid already in use";
pub const ETXTBSY2: &'static str = "file in use";
pub const EIO2: &'static str = "i/o error";
pub const ETXTBSY3: &'static str = "file already open for I/O";
pub const EINVAL2: &'static str = "illegal mode";
pub const ENAMETOOLONG2: &'static str = "illegal name";
pub const ENOTDIR2: &'static str = "not a directory";
pub const EPERM_GRP: &'static str = "not a member of proposed group";
pub const EACCES3: &'static str = "not owner";
pub const EACCES4: &'static str = "only owner can change group in wstat";
pub const EROFS2: &'static str = "read only file system";
pub const EPERM_SPFILE: &'static str = "no access to special file";
pub const EIO3: &'static str = "i/o count too large";
pub const EINVAL3: &'static str = "unknown group";
pub const EINVAL4: &'static str = "unknown user";
pub const EPROTO2: &'static str = "bogus wstat buffer";
pub const EAGAIN2: &'static str = "exclusive use file already open";
pub const EIO4: &'static str = "corrupted directory entry";
pub const EIO5: &'static str = "corrupted file entry";
pub const EIO6: &'static str = "corrupted block label";
pub const EIO7: &'static str = "corrupted meta data";
pub const EINVAL5: &'static str = "illegal offset";
pub const ENOENT_PATH: &'static str = "illegal path element";
pub const EIO8: &'static str = "root of file system is corrupted";
pub const EIO9: &'static str = "corrupted super block";
pub const EPROTO3: &'static str = "protocol botch";
pub const ENOSPC2: &'static str = "file system is full";
pub const EAGAIN3: &'static str = "file is in use";
pub const ENOENT_ALLOC: &'static str = "directory entry is not allocated";
pub const EROFS3: &'static str = "file is read only";
pub const EIDRM2: &'static str = "file has been removed";
pub const EPERM_TRUNCATE: &'static str = "only support truncation to zero length";
pub const EPERM_RMROOT: &'static str = "cannot remove root";
pub const EFBIG2: &'static str = "file too big";
pub const EIO10: &'static str = "venti i/o error";
}
| errno_from_ioerror | identifier_name |
mod.rs | generation: u32,
index: u32,
}
pub(crate) enum AllocAtWithoutReplacement {
Exists(EntityLocation),
DidNotExist,
ExistsWithWrongGeneration,
}
impl Entity {
#[cfg(test)]
pub(crate) const fn new(index: u32, generation: u32) -> Entity {
Entity { index, generation }
}
/// An entity ID with a placeholder value. This may or may not correspond to an actual entity,
/// and should be overwritten by a new value before being used.
///
/// ## Examples
///
/// Initializing a collection (e.g. `array` or `Vec`) with a known size:
///
/// ```no_run
/// # use bevy_ecs::prelude::*;
/// // Create a new array of size 10 filled with invalid entity ids.
/// let mut entities: [Entity; 10] = [Entity::PLACEHOLDER; 10];
///
/// //... replace the entities with valid ones.
/// ```
///
/// Deriving [`Reflect`](bevy_reflect::Reflect) for a component that has an `Entity` field:
///
/// ```no_run
/// # use bevy_ecs::{prelude::*, component::*};
/// # use bevy_reflect::Reflect;
/// #[derive(Reflect, Component)]
/// #[reflect(Component)]
/// pub struct MyStruct {
/// pub entity: Entity,
/// }
///
/// impl FromWorld for MyStruct {
/// fn from_world(_world: &mut World) -> Self {
/// Self {
/// entity: Entity::PLACEHOLDER,
/// }
/// }
/// }
/// ```
pub const PLACEHOLDER: Self = Self::from_raw(u32::MAX);
/// Creates a new entity ID with the specified `index` and a generation of 0.
///
/// # Note
///
/// Spawning a specific `entity` value is __rarely the right choice__. Most apps should favor
/// [`Commands::spawn`](crate::system::Commands::spawn). This method should generally
/// only be used for sharing entities across apps, and only when they have a scheme
/// worked out to share an index space (which doesn't happen by default).
///
/// In general, one should not try to synchronize the ECS by attempting to ensure that
/// `Entity` lines up between instances, but instead insert a secondary identifier as
/// a component.
pub const fn from_raw(index: u32) -> Entity {
Entity {
index,
generation: 0,
}
}
/// Convert to a form convenient for passing outside of rust.
///
/// Only useful for identifying entities within the same instance of an application. Do not use
/// for serialization between runs.
///
/// No particular structure is guaranteed for the returned bits.
pub const fn to_bits(self) -> u64 {
(self.generation as u64) << 32 | self.index as u64
}
/// Reconstruct an `Entity` previously destructured with [`Entity::to_bits`].
///
/// Only useful when applied to results from `to_bits` in the same instance of an application.
pub const fn from_bits(bits: u64) -> Self {
Self {
generation: (bits >> 32) as u32,
index: bits as u32,
}
}
/// Return a transiently unique identifier.
///
/// No two simultaneously-live entities share the same index, but dead entities' indices may collide
/// with both live and dead entities. Useful for compactly representing entities within a
/// specific snapshot of the world, such as when serializing.
#[inline]
pub const fn index(self) -> u32 {
self.index
}
/// Returns the generation of this Entity's index. The generation is incremented each time an
/// entity with a given index is despawned. This serves as a "count" of the number of times a
/// given index has been reused (index, generation) pairs uniquely identify a given Entity.
#[inline]
pub const fn generation(self) -> u32 {
self.generation
}
}
impl Serialize for Entity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u64(self.to_bits())
}
}
impl<'de> Deserialize<'de> for Entity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let id: u64 = serde::de::Deserialize::deserialize(deserializer)?;
Ok(Entity::from_bits(id))
}
}
impl fmt::Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}v{}", self.index, self.generation)
}
}
impl SparseSetIndex for Entity {
#[inline]
fn sparse_set_index(&self) -> usize {
self.index() as usize
}
#[inline]
fn get_sparse_set_index(value: usize) -> Self {
Entity::from_raw(value as u32)
}
}
/// An [`Iterator`] returning a sequence of [`Entity`] values from
/// [`Entities::reserve_entities`](crate::entity::Entities::reserve_entities).
pub struct ReserveEntitiesIterator<'a> {
// Metas, so we can recover the current generation for anything in the freelist.
meta: &'a [EntityMeta],
// Reserved indices formerly in the freelist to hand out.
index_iter: std::slice::Iter<'a, u32>,
// New Entity indices to hand out, outside the range of meta.len().
index_range: std::ops::Range<u32>,
}
impl<'a> Iterator for ReserveEntitiesIterator<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Self::Item> {
self.index_iter
.next()
.map(|&index| Entity {
generation: self.meta[index as usize].generation,
index,
})
.or_else(|| {
self.index_range.next().map(|index| Entity {
generation: 0,
index,
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.index_iter.len() + self.index_range.len();
(len, Some(len))
}
}
impl<'a> core::iter::ExactSizeIterator for ReserveEntitiesIterator<'a> {}
impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {}
/// A [`World`]'s internal metadata store on all of its entities.
///
/// Contains metadata on:
/// - The generation of every entity.
/// - The alive/dead status of a particular entity. (i.e. "has entity 3 been despawned?")
/// - The location of the entity's components in memory (via [`EntityLocation`])
///
/// [`World`]: crate::world::World
#[derive(Debug)]
pub struct Entities {
meta: Vec<EntityMeta>,
/// The `pending` and `free_cursor` fields describe three sets of Entity IDs
/// that have been freed or are in the process of being allocated:
///
/// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of
/// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer
/// these over brand new IDs.
///
/// - The `reserved` list of IDs that were once in the freelist, but got reserved by
/// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them
/// fully allocated.
///
/// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out
/// and reserved. [`flush`] will allocate room for them in `self.meta`.
///
/// The contents of `pending` look like this:
///
/// ```txt
/// ----------------------------
/// | freelist | reserved |
/// ----------------------------
/// ^ ^
/// free_cursor pending.len()
/// ```
///
/// As IDs are allocated, `free_cursor` is atomically decremented, moving
/// items from the freelist into the reserved list by sliding over the boundary.
///
/// Once the freelist runs out, `free_cursor` starts going negative.
/// The more negative it is, the more IDs have been reserved starting exactly at
/// the end of `meta.len()`.
///
/// This formulation allows us to reserve any number of IDs first from the freelist
/// and then from the new IDs, using only a single atomic subtract.
///
/// Once [`flush`] is done, `free_cursor` will equal `pending.len()`.
///
/// [`alloc`]: Entities::alloc
/// [`reserve_entity`]: Entities::reserve_entity
/// [`reserve_entities`]: Entities::reserve_entities
/// [`flush`]: Entities::flush
pending: Vec<u32>,
free_cursor: AtomicIdCursor,
/// Stores the number of free entities for [`len`](Entities::len)
len: u32,
}
impl Entities {
pub(crate) const fn new() -> Self {
Entities {
meta: Vec::new(),
pending: Vec::new(),
free_cursor: AtomicIdCursor::new(0),
len: 0,
}
}
/// Reserve entity IDs concurrently.
///
/// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush).
pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self
.free_cursor
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
.fetch_sub(IdCursor::try_from(count).unwrap(), Ordering::Relaxed);
let range_start = range_end - IdCursor::try_from(count).unwrap();
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
let (new_id_start, new_id_end) = if range_start >= 0 {
// We satisfied all requests from the freelist.
(0, 0)
} else {
// We need to allocate some new Entity IDs outside of the range of self.meta.
//
// `range_start` covers some negative territory, e.g. `-3..6`.
// Since the nonnegative values `0..6` are handled by the freelist, that
// means we need to handle the negative range here.
//
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0.. meta.len()+3`.
let base = self.meta.len() as IdCursor;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
// `new_id_end` is in range, so no need to check `start`.
let new_id_start = (base - range_end.min(0)) as u32;
(new_id_start, new_id_end)
};
ReserveEntitiesIterator {
meta: &self.meta[..],
index_iter: self.pending[freelist_range].iter(),
index_range: new_id_start..new_id_end,
}
}
/// Reserve one entity ID concurrently.
///
/// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
pub fn reserve_entity(&self) -> Entity {
let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
if n > 0 {
// Allocate from the freelist.
let index = self.pending[(n - 1) as usize];
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
// Grab a new ID, outside the range of `meta.len()`. `flush()` must
// eventually be called to make it valid.
//
// As `self.free_cursor` goes more and more negative, we return IDs farther
// and farther beyond `meta.len()`.
Entity {
generation: 0,
index: u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"),
}
}
}
/// Check that we do not have pending work requiring `flush()` to be called.
fn verify_flushed(&mut self) {
debug_assert!(
!self.needs_flush(),
"flush() needs to be called before this operation is legal"
);
}
/// Allocate an entity ID directly.
pub fn alloc(&mut self) -> Entity {
self.verify_flushed();
self.len += 1;
if let Some(index) = self.pending.pop() {
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
let index = u32::try_from(self.meta.len()).expect("too many entities");
self.meta.push(EntityMeta::EMPTY);
Entity {
generation: 0,
index,
}
}
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any. Location should be
/// written immediately.
pub fn | (&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let loc = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
None
} else {
Some(mem::replace(
&mut self.meta[entity.index as usize].location,
EntityMeta::EMPTY.location,
))
};
self.meta[entity.index as usize].generation = entity.generation;
loc
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any.
pub(crate) fn alloc_at_without_replacement(
&mut self,
entity: Entity,
) -> AllocAtWithoutReplacement {
self.verify_flushed();
let result = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else {
let current_meta = &self.meta[entity.index as usize];
if current_meta.location.archetype_id == ArchetypeId::INVALID {
AllocAtWithoutReplacement::DidNotExist
} else if current_meta.generation == entity.generation {
AllocAtWithoutReplacement::Exists(current_meta.location)
} else {
return AllocAtWithoutReplacement::ExistsWithWrongGeneration;
}
};
self.meta[entity.index as usize].generation = entity.generation;
result
}
/// Destroy an entity, allowing it to be reused.
///
/// Must not be called while reserved entities are awaiting `flush()`.
pub fn free(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let meta = &mut self.meta[entity.index as usize];
if meta.generation!= entity.generation {
return None;
}
meta.generation += 1;
let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
self.pending.push(entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len -= 1;
Some(loc)
}
/// Ensure at least `n` allocations can succeed without reallocating.
pub fn reserve(&mut self, additional: u32) {
self.verify_flushed();
let freelist_size = *self.free_cursor.get_mut();
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
let shortfall = IdCursor::try_from(additional).unwrap() - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
}
/// Returns true if the [`Entities`] contains [`entity`](Entity).
// This will return false for entities which have been freed, even if
// not reallocated since the generation is incremented in `free`
pub fn contains(&self, entity: Entity) -> bool {
self.resolve_from_id(entity.index())
.map_or(false, |e| e.generation() == entity.generation)
}
/// Clears all [`Entity`] from the World.
pub fn clear(&mut self) {
self.meta.clear();
self.pending.clear();
*self.free_cursor.get_mut() = 0;
self.len = 0;
}
/// Returns the location of an [`Entity`].
/// Note: for pending entities, returns `Some(EntityLocation::INVALID)`.
#[inline]
pub fn get(&self, entity: Entity) -> Option<EntityLocation> {
if let Some(meta) = self.meta.get(entity.index as usize) {
if meta.generation!= entity.generation
|| meta.location.archetype_id == ArchetypeId::INVALID
{
return None;
}
Some(meta.location)
} else {
None
}
}
/// Updates the location of an [`Entity`]. This must be called when moving the components of
/// the entity around in storage.
///
/// # Safety
/// - `index` must be a valid entity index.
/// - `location` must be valid for the entity at `index` or immediately made valid afterwards
/// before handing control to unknown code.
#[inline]
pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) {
// SAFETY: Caller guarantees that `index` a valid entity index
self.meta.get_unchecked_mut(index as usize).location = location;
}
/// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this
/// `index` will count `generation` starting from the prior `generation` + the specified
/// value + 1.
///
/// Does nothing if no entity with this `index` has been allocated yet.
pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool {
if (index as usize) >= self.meta.len() {
return false;
}
let meta = &mut self.meta[index as usize];
if meta.location.archetype_id == ArchetypeId::INVALID {
meta.generation += generations;
true
} else {
false
}
}
/// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection
/// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities
///
/// Note: This method may return [`Entities`](Entity) which are currently free
/// Note that [`contains`](Entities::contains) will correctly return false for freed
/// entities, since it checks the generation
pub fn resolve_from_id(&self, index: u32) -> Option<Entity> {
let idu = index as usize;
if let Some(&EntityMeta { generation,.. }) = self.meta.get(idu) {
Some(Entity { generation, index })
} else {
// `id` is outside of the meta list - check whether it is reserved but not yet flushed.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
// If this entity was manually created, then free_cursor might be positive
// Returning None handles that case correctly
let num_pending = usize::try_from(-free_cursor).ok()?;
(idu < self.meta.len() + num_pending).then_some(Entity {
generation: 0,
index,
})
}
}
fn needs_flush(&mut self) -> bool {
*self.free_cursor.get_mut()!= self.pending.len() as IdCursor
}
| alloc_at | identifier_name |
mod.rs | <'de>,
{
let id: u64 = serde::de::Deserialize::deserialize(deserializer)?;
Ok(Entity::from_bits(id))
}
}
impl fmt::Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}v{}", self.index, self.generation)
}
}
impl SparseSetIndex for Entity {
#[inline]
fn sparse_set_index(&self) -> usize {
self.index() as usize
}
#[inline]
fn get_sparse_set_index(value: usize) -> Self {
Entity::from_raw(value as u32)
}
}
/// An [`Iterator`] returning a sequence of [`Entity`] values from
/// [`Entities::reserve_entities`](crate::entity::Entities::reserve_entities).
pub struct ReserveEntitiesIterator<'a> {
// Metas, so we can recover the current generation for anything in the freelist.
meta: &'a [EntityMeta],
// Reserved indices formerly in the freelist to hand out.
index_iter: std::slice::Iter<'a, u32>,
// New Entity indices to hand out, outside the range of meta.len().
index_range: std::ops::Range<u32>,
}
impl<'a> Iterator for ReserveEntitiesIterator<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Self::Item> {
self.index_iter
.next()
.map(|&index| Entity {
generation: self.meta[index as usize].generation,
index,
})
.or_else(|| {
self.index_range.next().map(|index| Entity {
generation: 0,
index,
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.index_iter.len() + self.index_range.len();
(len, Some(len))
}
}
impl<'a> core::iter::ExactSizeIterator for ReserveEntitiesIterator<'a> {}
impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {}
/// A [`World`]'s internal metadata store on all of its entities.
///
/// Contains metadata on:
/// - The generation of every entity.
/// - The alive/dead status of a particular entity. (i.e. "has entity 3 been despawned?")
/// - The location of the entity's components in memory (via [`EntityLocation`])
///
/// [`World`]: crate::world::World
#[derive(Debug)]
pub struct Entities {
meta: Vec<EntityMeta>,
/// The `pending` and `free_cursor` fields describe three sets of Entity IDs
/// that have been freed or are in the process of being allocated:
///
/// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of
/// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer
/// these over brand new IDs.
///
/// - The `reserved` list of IDs that were once in the freelist, but got reserved by
/// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them
/// fully allocated.
///
/// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out
/// and reserved. [`flush`] will allocate room for them in `self.meta`.
///
/// The contents of `pending` look like this:
///
/// ```txt
/// ----------------------------
/// | freelist | reserved |
/// ----------------------------
/// ^ ^
/// free_cursor pending.len()
/// ```
///
/// As IDs are allocated, `free_cursor` is atomically decremented, moving
/// items from the freelist into the reserved list by sliding over the boundary.
///
/// Once the freelist runs out, `free_cursor` starts going negative.
/// The more negative it is, the more IDs have been reserved starting exactly at
/// the end of `meta.len()`.
///
/// This formulation allows us to reserve any number of IDs first from the freelist
/// and then from the new IDs, using only a single atomic subtract.
///
/// Once [`flush`] is done, `free_cursor` will equal `pending.len()`.
///
/// [`alloc`]: Entities::alloc
/// [`reserve_entity`]: Entities::reserve_entity
/// [`reserve_entities`]: Entities::reserve_entities
/// [`flush`]: Entities::flush
pending: Vec<u32>,
free_cursor: AtomicIdCursor,
/// Stores the number of free entities for [`len`](Entities::len)
len: u32,
}
impl Entities {
pub(crate) const fn new() -> Self {
Entities {
meta: Vec::new(),
pending: Vec::new(),
free_cursor: AtomicIdCursor::new(0),
len: 0,
}
}
/// Reserve entity IDs concurrently.
///
/// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush).
pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self
.free_cursor
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
.fetch_sub(IdCursor::try_from(count).unwrap(), Ordering::Relaxed);
let range_start = range_end - IdCursor::try_from(count).unwrap();
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
let (new_id_start, new_id_end) = if range_start >= 0 {
// We satisfied all requests from the freelist.
(0, 0)
} else {
// We need to allocate some new Entity IDs outside of the range of self.meta.
//
// `range_start` covers some negative territory, e.g. `-3..6`.
// Since the nonnegative values `0..6` are handled by the freelist, that
// means we need to handle the negative range here.
//
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0.. meta.len()+3`.
let base = self.meta.len() as IdCursor;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
// `new_id_end` is in range, so no need to check `start`.
let new_id_start = (base - range_end.min(0)) as u32;
(new_id_start, new_id_end)
};
ReserveEntitiesIterator {
meta: &self.meta[..],
index_iter: self.pending[freelist_range].iter(),
index_range: new_id_start..new_id_end,
}
}
/// Reserve one entity ID concurrently.
///
/// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
pub fn reserve_entity(&self) -> Entity {
let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
if n > 0 {
// Allocate from the freelist.
let index = self.pending[(n - 1) as usize];
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
// Grab a new ID, outside the range of `meta.len()`. `flush()` must
// eventually be called to make it valid.
//
// As `self.free_cursor` goes more and more negative, we return IDs farther
// and farther beyond `meta.len()`.
Entity {
generation: 0,
index: u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"),
}
}
}
/// Check that we do not have pending work requiring `flush()` to be called.
fn verify_flushed(&mut self) {
debug_assert!(
!self.needs_flush(),
"flush() needs to be called before this operation is legal"
);
}
/// Allocate an entity ID directly.
pub fn alloc(&mut self) -> Entity {
self.verify_flushed();
self.len += 1;
if let Some(index) = self.pending.pop() {
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
let index = u32::try_from(self.meta.len()).expect("too many entities");
self.meta.push(EntityMeta::EMPTY);
Entity {
generation: 0,
index,
}
}
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any. Location should be
/// written immediately.
pub fn alloc_at(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let loc = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
None
} else {
Some(mem::replace(
&mut self.meta[entity.index as usize].location,
EntityMeta::EMPTY.location,
))
};
self.meta[entity.index as usize].generation = entity.generation;
loc
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any.
pub(crate) fn alloc_at_without_replacement(
&mut self,
entity: Entity,
) -> AllocAtWithoutReplacement {
self.verify_flushed();
let result = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else {
let current_meta = &self.meta[entity.index as usize];
if current_meta.location.archetype_id == ArchetypeId::INVALID {
AllocAtWithoutReplacement::DidNotExist
} else if current_meta.generation == entity.generation {
AllocAtWithoutReplacement::Exists(current_meta.location)
} else {
return AllocAtWithoutReplacement::ExistsWithWrongGeneration;
}
};
self.meta[entity.index as usize].generation = entity.generation;
result
}
/// Destroy an entity, allowing it to be reused.
///
/// Must not be called while reserved entities are awaiting `flush()`.
pub fn free(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let meta = &mut self.meta[entity.index as usize];
if meta.generation!= entity.generation {
return None;
}
meta.generation += 1;
let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
self.pending.push(entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len -= 1;
Some(loc)
}
/// Ensure at least `n` allocations can succeed without reallocating.
pub fn reserve(&mut self, additional: u32) {
self.verify_flushed();
let freelist_size = *self.free_cursor.get_mut();
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
let shortfall = IdCursor::try_from(additional).unwrap() - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
}
/// Returns true if the [`Entities`] contains [`entity`](Entity).
// This will return false for entities which have been freed, even if
// not reallocated since the generation is incremented in `free`
pub fn contains(&self, entity: Entity) -> bool {
self.resolve_from_id(entity.index())
.map_or(false, |e| e.generation() == entity.generation)
}
/// Clears all [`Entity`] from the World.
pub fn clear(&mut self) {
self.meta.clear();
self.pending.clear();
*self.free_cursor.get_mut() = 0;
self.len = 0;
}
/// Returns the location of an [`Entity`].
/// Note: for pending entities, returns `Some(EntityLocation::INVALID)`.
#[inline]
pub fn get(&self, entity: Entity) -> Option<EntityLocation> {
if let Some(meta) = self.meta.get(entity.index as usize) {
if meta.generation!= entity.generation
|| meta.location.archetype_id == ArchetypeId::INVALID
{
return None;
}
Some(meta.location)
} else {
None
}
}
/// Updates the location of an [`Entity`]. This must be called when moving the components of
/// the entity around in storage.
///
/// # Safety
/// - `index` must be a valid entity index.
/// - `location` must be valid for the entity at `index` or immediately made valid afterwards
/// before handing control to unknown code.
#[inline]
pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) {
// SAFETY: Caller guarantees that `index` a valid entity index
self.meta.get_unchecked_mut(index as usize).location = location;
}
/// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this
/// `index` will count `generation` starting from the prior `generation` + the specified
/// value + 1.
///
/// Does nothing if no entity with this `index` has been allocated yet.
pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool {
if (index as usize) >= self.meta.len() {
return false;
}
let meta = &mut self.meta[index as usize];
if meta.location.archetype_id == ArchetypeId::INVALID {
meta.generation += generations;
true
} else {
false
}
}
/// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection
/// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities
///
/// Note: This method may return [`Entities`](Entity) which are currently free
/// Note that [`contains`](Entities::contains) will correctly return false for freed
/// entities, since it checks the generation
pub fn resolve_from_id(&self, index: u32) -> Option<Entity> {
let idu = index as usize;
if let Some(&EntityMeta { generation,.. }) = self.meta.get(idu) {
Some(Entity { generation, index })
} else {
// `id` is outside of the meta list - check whether it is reserved but not yet flushed.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
// If this entity was manually created, then free_cursor might be positive
// Returning None handles that case correctly
let num_pending = usize::try_from(-free_cursor).ok()?;
(idu < self.meta.len() + num_pending).then_some(Entity {
generation: 0,
index,
})
}
}
fn needs_flush(&mut self) -> bool {
*self.free_cursor.get_mut()!= self.pending.len() as IdCursor
}
/// Allocates space for entities previously reserved with [`reserve_entity`](Entities::reserve_entity) or
/// [`reserve_entities`](Entities::reserve_entities), then initializes each one using the supplied function.
///
/// # Safety
/// Flush _must_ set the entity location to the correct [`ArchetypeId`] for the given [`Entity`]
/// each time init is called. This _can_ be [`ArchetypeId::INVALID`], provided the [`Entity`]
/// has not been assigned to an [`Archetype`][crate::archetype::Archetype].
///
/// Note: freshly-allocated entities (ones which don't come from the pending list) are guaranteed
/// to be initialized with the invalid archetype.
pub unsafe fn flush(&mut self, mut init: impl FnMut(Entity, &mut EntityLocation)) {
let free_cursor = self.free_cursor.get_mut();
let current_free_cursor = *free_cursor;
let new_free_cursor = if current_free_cursor >= 0 {
current_free_cursor as usize
} else {
let old_meta_len = self.meta.len();
let new_meta_len = old_meta_len + -current_free_cursor as usize;
self.meta.resize(new_meta_len, EntityMeta::EMPTY);
self.len += -current_free_cursor as u32;
for (index, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) {
init(
Entity {
index: index as u32,
generation: meta.generation,
},
&mut meta.location,
);
}
*free_cursor = 0;
0
};
self.len += (self.pending.len() - new_free_cursor) as u32;
for index in self.pending.drain(new_free_cursor..) {
let meta = &mut self.meta[index as usize];
init(
Entity {
index,
generation: meta.generation,
},
&mut meta.location,
);
}
}
/// Flushes all reserved entities to an "invalid" state. Attempting to retrieve them will return `None`
/// unless they are later populated with a valid archetype.
pub fn flush_as_invalid(&mut self) {
// SAFETY: as per `flush` safety docs, the archetype id can be set to [`ArchetypeId::INVALID`] if
// the [`Entity`] has not been assigned to an [`Archetype`][crate::archetype::Archetype], which is the case here
unsafe {
self.flush(|_entity, location| {
location.archetype_id = ArchetypeId::INVALID;
});
}
}
/// # Safety
///
/// This function is safe if and only if the world this Entities is on has no entities.
pub unsafe fn flush_and_reserve_invalid_assuming_no_entities(&mut self, count: usize) {
let free_cursor = self.free_cursor.get_mut();
*free_cursor = 0;
self.meta.reserve(count);
// the EntityMeta struct only contains integers, and it is valid to have all bytes set to u8::MAX
self.meta.as_mut_ptr().write_bytes(u8::MAX, count);
self.meta.set_len(count);
self.len = count as u32;
}
/// The count of all entities in the [`World`] that have ever been allocated
/// including the entities that are currently freed. | ///
/// This does not include entities that have been reserved but have never been
/// allocated yet.
/// | random_line_split |
|
mod.rs | generation: u32,
index: u32,
}
pub(crate) enum AllocAtWithoutReplacement {
Exists(EntityLocation),
DidNotExist,
ExistsWithWrongGeneration,
}
impl Entity {
#[cfg(test)]
pub(crate) const fn new(index: u32, generation: u32) -> Entity {
Entity { index, generation }
}
/// An entity ID with a placeholder value. This may or may not correspond to an actual entity,
/// and should be overwritten by a new value before being used.
///
/// ## Examples
///
/// Initializing a collection (e.g. `array` or `Vec`) with a known size:
///
/// ```no_run
/// # use bevy_ecs::prelude::*;
/// // Create a new array of size 10 filled with invalid entity ids.
/// let mut entities: [Entity; 10] = [Entity::PLACEHOLDER; 10];
///
/// //... replace the entities with valid ones.
/// ```
///
/// Deriving [`Reflect`](bevy_reflect::Reflect) for a component that has an `Entity` field:
///
/// ```no_run
/// # use bevy_ecs::{prelude::*, component::*};
/// # use bevy_reflect::Reflect;
/// #[derive(Reflect, Component)]
/// #[reflect(Component)]
/// pub struct MyStruct {
/// pub entity: Entity,
/// }
///
/// impl FromWorld for MyStruct {
/// fn from_world(_world: &mut World) -> Self {
/// Self {
/// entity: Entity::PLACEHOLDER,
/// }
/// }
/// }
/// ```
pub const PLACEHOLDER: Self = Self::from_raw(u32::MAX);
/// Creates a new entity ID with the specified `index` and a generation of 0.
///
/// # Note
///
/// Spawning a specific `entity` value is __rarely the right choice__. Most apps should favor
/// [`Commands::spawn`](crate::system::Commands::spawn). This method should generally
/// only be used for sharing entities across apps, and only when they have a scheme
/// worked out to share an index space (which doesn't happen by default).
///
/// In general, one should not try to synchronize the ECS by attempting to ensure that
/// `Entity` lines up between instances, but instead insert a secondary identifier as
/// a component.
pub const fn from_raw(index: u32) -> Entity {
Entity {
index,
generation: 0,
}
}
/// Convert to a form convenient for passing outside of rust.
///
/// Only useful for identifying entities within the same instance of an application. Do not use
/// for serialization between runs.
///
/// No particular structure is guaranteed for the returned bits.
pub const fn to_bits(self) -> u64 {
(self.generation as u64) << 32 | self.index as u64
}
/// Reconstruct an `Entity` previously destructured with [`Entity::to_bits`].
///
/// Only useful when applied to results from `to_bits` in the same instance of an application.
pub const fn from_bits(bits: u64) -> Self {
Self {
generation: (bits >> 32) as u32,
index: bits as u32,
}
}
/// Return a transiently unique identifier.
///
/// No two simultaneously-live entities share the same index, but dead entities' indices may collide
/// with both live and dead entities. Useful for compactly representing entities within a
/// specific snapshot of the world, such as when serializing.
#[inline]
pub const fn index(self) -> u32 {
self.index
}
/// Returns the generation of this Entity's index. The generation is incremented each time an
/// entity with a given index is despawned. This serves as a "count" of the number of times a
/// given index has been reused (index, generation) pairs uniquely identify a given Entity.
#[inline]
pub const fn generation(self) -> u32 {
self.generation
}
}
impl Serialize for Entity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u64(self.to_bits())
}
}
impl<'de> Deserialize<'de> for Entity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let id: u64 = serde::de::Deserialize::deserialize(deserializer)?;
Ok(Entity::from_bits(id))
}
}
impl fmt::Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}v{}", self.index, self.generation)
}
}
impl SparseSetIndex for Entity {
#[inline]
fn sparse_set_index(&self) -> usize {
self.index() as usize
}
#[inline]
fn get_sparse_set_index(value: usize) -> Self {
Entity::from_raw(value as u32)
}
}
/// An [`Iterator`] returning a sequence of [`Entity`] values from
/// [`Entities::reserve_entities`](crate::entity::Entities::reserve_entities).
pub struct ReserveEntitiesIterator<'a> {
// Metas, so we can recover the current generation for anything in the freelist.
meta: &'a [EntityMeta],
// Reserved indices formerly in the freelist to hand out.
index_iter: std::slice::Iter<'a, u32>,
// New Entity indices to hand out, outside the range of meta.len().
index_range: std::ops::Range<u32>,
}
impl<'a> Iterator for ReserveEntitiesIterator<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Self::Item> {
self.index_iter
.next()
.map(|&index| Entity {
generation: self.meta[index as usize].generation,
index,
})
.or_else(|| {
self.index_range.next().map(|index| Entity {
generation: 0,
index,
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.index_iter.len() + self.index_range.len();
(len, Some(len))
}
}
impl<'a> core::iter::ExactSizeIterator for ReserveEntitiesIterator<'a> {}
impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {}
/// A [`World`]'s internal metadata store on all of its entities.
///
/// Contains metadata on:
/// - The generation of every entity.
/// - The alive/dead status of a particular entity. (i.e. "has entity 3 been despawned?")
/// - The location of the entity's components in memory (via [`EntityLocation`])
///
/// [`World`]: crate::world::World
#[derive(Debug)]
pub struct Entities {
meta: Vec<EntityMeta>,
/// The `pending` and `free_cursor` fields describe three sets of Entity IDs
/// that have been freed or are in the process of being allocated:
///
/// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of
/// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer
/// these over brand new IDs.
///
/// - The `reserved` list of IDs that were once in the freelist, but got reserved by
/// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them
/// fully allocated.
///
/// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out
/// and reserved. [`flush`] will allocate room for them in `self.meta`.
///
/// The contents of `pending` look like this:
///
/// ```txt
/// ----------------------------
/// | freelist | reserved |
/// ----------------------------
/// ^ ^
/// free_cursor pending.len()
/// ```
///
/// As IDs are allocated, `free_cursor` is atomically decremented, moving
/// items from the freelist into the reserved list by sliding over the boundary.
///
/// Once the freelist runs out, `free_cursor` starts going negative.
/// The more negative it is, the more IDs have been reserved starting exactly at
/// the end of `meta.len()`.
///
/// This formulation allows us to reserve any number of IDs first from the freelist
/// and then from the new IDs, using only a single atomic subtract.
///
/// Once [`flush`] is done, `free_cursor` will equal `pending.len()`.
///
/// [`alloc`]: Entities::alloc
/// [`reserve_entity`]: Entities::reserve_entity
/// [`reserve_entities`]: Entities::reserve_entities
/// [`flush`]: Entities::flush
pending: Vec<u32>,
free_cursor: AtomicIdCursor,
/// Stores the number of free entities for [`len`](Entities::len)
len: u32,
}
impl Entities {
pub(crate) const fn new() -> Self {
Entities {
meta: Vec::new(),
pending: Vec::new(),
free_cursor: AtomicIdCursor::new(0),
len: 0,
}
}
/// Reserve entity IDs concurrently.
///
/// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush).
pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self
.free_cursor
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
.fetch_sub(IdCursor::try_from(count).unwrap(), Ordering::Relaxed);
let range_start = range_end - IdCursor::try_from(count).unwrap();
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
let (new_id_start, new_id_end) = if range_start >= 0 {
// We satisfied all requests from the freelist.
(0, 0)
} else {
// We need to allocate some new Entity IDs outside of the range of self.meta.
//
// `range_start` covers some negative territory, e.g. `-3..6`.
// Since the nonnegative values `0..6` are handled by the freelist, that
// means we need to handle the negative range here.
//
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0.. meta.len()+3`.
let base = self.meta.len() as IdCursor;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
// `new_id_end` is in range, so no need to check `start`.
let new_id_start = (base - range_end.min(0)) as u32;
(new_id_start, new_id_end)
};
ReserveEntitiesIterator {
meta: &self.meta[..],
index_iter: self.pending[freelist_range].iter(),
index_range: new_id_start..new_id_end,
}
}
/// Reserve one entity ID concurrently.
///
/// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
pub fn reserve_entity(&self) -> Entity {
let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
if n > 0 {
// Allocate from the freelist.
let index = self.pending[(n - 1) as usize];
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
// Grab a new ID, outside the range of `meta.len()`. `flush()` must
// eventually be called to make it valid.
//
// As `self.free_cursor` goes more and more negative, we return IDs farther
// and farther beyond `meta.len()`.
Entity {
generation: 0,
index: u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"),
}
}
}
/// Check that we do not have pending work requiring `flush()` to be called.
fn verify_flushed(&mut self) {
debug_assert!(
!self.needs_flush(),
"flush() needs to be called before this operation is legal"
);
}
/// Allocate an entity ID directly.
pub fn alloc(&mut self) -> Entity {
self.verify_flushed();
self.len += 1;
if let Some(index) = self.pending.pop() {
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
let index = u32::try_from(self.meta.len()).expect("too many entities");
self.meta.push(EntityMeta::EMPTY);
Entity {
generation: 0,
index,
}
}
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any. Location should be
/// written immediately.
pub fn alloc_at(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let loc = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
None
} else {
Some(mem::replace(
&mut self.meta[entity.index as usize].location,
EntityMeta::EMPTY.location,
))
};
self.meta[entity.index as usize].generation = entity.generation;
loc
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any.
pub(crate) fn alloc_at_without_replacement(
&mut self,
entity: Entity,
) -> AllocAtWithoutReplacement {
self.verify_flushed();
let result = if entity.index as usize >= self.meta.len() | else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else {
let current_meta = &self.meta[entity.index as usize];
if current_meta.location.archetype_id == ArchetypeId::INVALID {
AllocAtWithoutReplacement::DidNotExist
} else if current_meta.generation == entity.generation {
AllocAtWithoutReplacement::Exists(current_meta.location)
} else {
return AllocAtWithoutReplacement::ExistsWithWrongGeneration;
}
};
self.meta[entity.index as usize].generation = entity.generation;
result
}
/// Destroy an entity, allowing it to be reused.
///
/// Must not be called while reserved entities are awaiting `flush()`.
pub fn free(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let meta = &mut self.meta[entity.index as usize];
if meta.generation!= entity.generation {
return None;
}
meta.generation += 1;
let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
self.pending.push(entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len -= 1;
Some(loc)
}
/// Ensure at least `n` allocations can succeed without reallocating.
pub fn reserve(&mut self, additional: u32) {
self.verify_flushed();
let freelist_size = *self.free_cursor.get_mut();
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
let shortfall = IdCursor::try_from(additional).unwrap() - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
}
/// Returns true if the [`Entities`] contains [`entity`](Entity).
// This will return false for entities which have been freed, even if
// not reallocated since the generation is incremented in `free`
pub fn contains(&self, entity: Entity) -> bool {
self.resolve_from_id(entity.index())
.map_or(false, |e| e.generation() == entity.generation)
}
/// Clears all [`Entity`] from the World.
pub fn clear(&mut self) {
self.meta.clear();
self.pending.clear();
*self.free_cursor.get_mut() = 0;
self.len = 0;
}
/// Returns the location of an [`Entity`].
/// Note: for pending entities, returns `Some(EntityLocation::INVALID)`.
#[inline]
pub fn get(&self, entity: Entity) -> Option<EntityLocation> {
if let Some(meta) = self.meta.get(entity.index as usize) {
if meta.generation!= entity.generation
|| meta.location.archetype_id == ArchetypeId::INVALID
{
return None;
}
Some(meta.location)
} else {
None
}
}
/// Updates the location of an [`Entity`]. This must be called when moving the components of
/// the entity around in storage.
///
/// # Safety
/// - `index` must be a valid entity index.
/// - `location` must be valid for the entity at `index` or immediately made valid afterwards
/// before handing control to unknown code.
#[inline]
pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) {
// SAFETY: Caller guarantees that `index` a valid entity index
self.meta.get_unchecked_mut(index as usize).location = location;
}
/// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this
/// `index` will count `generation` starting from the prior `generation` + the specified
/// value + 1.
///
/// Does nothing if no entity with this `index` has been allocated yet.
pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool {
if (index as usize) >= self.meta.len() {
return false;
}
let meta = &mut self.meta[index as usize];
if meta.location.archetype_id == ArchetypeId::INVALID {
meta.generation += generations;
true
} else {
false
}
}
/// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection
/// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities
///
/// Note: This method may return [`Entities`](Entity) which are currently free
/// Note that [`contains`](Entities::contains) will correctly return false for freed
/// entities, since it checks the generation
pub fn resolve_from_id(&self, index: u32) -> Option<Entity> {
let idu = index as usize;
if let Some(&EntityMeta { generation,.. }) = self.meta.get(idu) {
Some(Entity { generation, index })
} else {
// `id` is outside of the meta list - check whether it is reserved but not yet flushed.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
// If this entity was manually created, then free_cursor might be positive
// Returning None handles that case correctly
let num_pending = usize::try_from(-free_cursor).ok()?;
(idu < self.meta.len() + num_pending).then_some(Entity {
generation: 0,
index,
})
}
}
fn needs_flush(&mut self) -> bool {
*self.free_cursor.get_mut()!= self.pending.len() as IdCursor
}
| {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} | conditional_block |
mod.rs | generation: u32,
index: u32,
}
pub(crate) enum AllocAtWithoutReplacement {
Exists(EntityLocation),
DidNotExist,
ExistsWithWrongGeneration,
}
impl Entity {
#[cfg(test)]
pub(crate) const fn new(index: u32, generation: u32) -> Entity {
Entity { index, generation }
}
/// An entity ID with a placeholder value. This may or may not correspond to an actual entity,
/// and should be overwritten by a new value before being used.
///
/// ## Examples
///
/// Initializing a collection (e.g. `array` or `Vec`) with a known size:
///
/// ```no_run
/// # use bevy_ecs::prelude::*;
/// // Create a new array of size 10 filled with invalid entity ids.
/// let mut entities: [Entity; 10] = [Entity::PLACEHOLDER; 10];
///
/// //... replace the entities with valid ones.
/// ```
///
/// Deriving [`Reflect`](bevy_reflect::Reflect) for a component that has an `Entity` field:
///
/// ```no_run
/// # use bevy_ecs::{prelude::*, component::*};
/// # use bevy_reflect::Reflect;
/// #[derive(Reflect, Component)]
/// #[reflect(Component)]
/// pub struct MyStruct {
/// pub entity: Entity,
/// }
///
/// impl FromWorld for MyStruct {
/// fn from_world(_world: &mut World) -> Self {
/// Self {
/// entity: Entity::PLACEHOLDER,
/// }
/// }
/// }
/// ```
pub const PLACEHOLDER: Self = Self::from_raw(u32::MAX);
/// Creates a new entity ID with the specified `index` and a generation of 0.
///
/// # Note
///
/// Spawning a specific `entity` value is __rarely the right choice__. Most apps should favor
/// [`Commands::spawn`](crate::system::Commands::spawn). This method should generally
/// only be used for sharing entities across apps, and only when they have a scheme
/// worked out to share an index space (which doesn't happen by default).
///
/// In general, one should not try to synchronize the ECS by attempting to ensure that
/// `Entity` lines up between instances, but instead insert a secondary identifier as
/// a component.
pub const fn from_raw(index: u32) -> Entity {
Entity {
index,
generation: 0,
}
}
/// Convert to a form convenient for passing outside of rust.
///
/// Only useful for identifying entities within the same instance of an application. Do not use
/// for serialization between runs.
///
/// No particular structure is guaranteed for the returned bits.
pub const fn to_bits(self) -> u64 {
(self.generation as u64) << 32 | self.index as u64
}
/// Reconstruct an `Entity` previously destructured with [`Entity::to_bits`].
///
/// Only useful when applied to results from `to_bits` in the same instance of an application.
pub const fn from_bits(bits: u64) -> Self {
Self {
generation: (bits >> 32) as u32,
index: bits as u32,
}
}
/// Return a transiently unique identifier.
///
/// No two simultaneously-live entities share the same index, but dead entities' indices may collide
/// with both live and dead entities. Useful for compactly representing entities within a
/// specific snapshot of the world, such as when serializing.
#[inline]
pub const fn index(self) -> u32 {
self.index
}
/// Returns the generation of this Entity's index. The generation is incremented each time an
/// entity with a given index is despawned. This serves as a "count" of the number of times a
/// given index has been reused (index, generation) pairs uniquely identify a given Entity.
#[inline]
pub const fn generation(self) -> u32 {
self.generation
}
}
impl Serialize for Entity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u64(self.to_bits())
}
}
impl<'de> Deserialize<'de> for Entity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let id: u64 = serde::de::Deserialize::deserialize(deserializer)?;
Ok(Entity::from_bits(id))
}
}
impl fmt::Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}v{}", self.index, self.generation)
}
}
impl SparseSetIndex for Entity {
#[inline]
fn sparse_set_index(&self) -> usize {
self.index() as usize
}
#[inline]
fn get_sparse_set_index(value: usize) -> Self {
Entity::from_raw(value as u32)
}
}
/// An [`Iterator`] returning a sequence of [`Entity`] values from
/// [`Entities::reserve_entities`](crate::entity::Entities::reserve_entities).
pub struct ReserveEntitiesIterator<'a> {
// Metas, so we can recover the current generation for anything in the freelist.
meta: &'a [EntityMeta],
// Reserved indices formerly in the freelist to hand out.
index_iter: std::slice::Iter<'a, u32>,
// New Entity indices to hand out, outside the range of meta.len().
index_range: std::ops::Range<u32>,
}
impl<'a> Iterator for ReserveEntitiesIterator<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Self::Item> {
self.index_iter
.next()
.map(|&index| Entity {
generation: self.meta[index as usize].generation,
index,
})
.or_else(|| {
self.index_range.next().map(|index| Entity {
generation: 0,
index,
})
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.index_iter.len() + self.index_range.len();
(len, Some(len))
}
}
impl<'a> core::iter::ExactSizeIterator for ReserveEntitiesIterator<'a> {}
impl<'a> core::iter::FusedIterator for ReserveEntitiesIterator<'a> {}
/// A [`World`]'s internal metadata store on all of its entities.
///
/// Contains metadata on:
/// - The generation of every entity.
/// - The alive/dead status of a particular entity. (i.e. "has entity 3 been despawned?")
/// - The location of the entity's components in memory (via [`EntityLocation`])
///
/// [`World`]: crate::world::World
#[derive(Debug)]
pub struct Entities {
meta: Vec<EntityMeta>,
/// The `pending` and `free_cursor` fields describe three sets of Entity IDs
/// that have been freed or are in the process of being allocated:
///
/// - The `freelist` IDs, previously freed by `free()`. These IDs are available to any of
/// [`alloc`], [`reserve_entity`] or [`reserve_entities`]. Allocation will always prefer
/// these over brand new IDs.
///
/// - The `reserved` list of IDs that were once in the freelist, but got reserved by
/// [`reserve_entities`] or [`reserve_entity`]. They are now waiting for [`flush`] to make them
/// fully allocated.
///
/// - The count of new IDs that do not yet exist in `self.meta`, but which we have handed out
/// and reserved. [`flush`] will allocate room for them in `self.meta`.
///
/// The contents of `pending` look like this:
///
/// ```txt
/// ----------------------------
/// | freelist | reserved |
/// ----------------------------
/// ^ ^
/// free_cursor pending.len()
/// ```
///
/// As IDs are allocated, `free_cursor` is atomically decremented, moving
/// items from the freelist into the reserved list by sliding over the boundary.
///
/// Once the freelist runs out, `free_cursor` starts going negative.
/// The more negative it is, the more IDs have been reserved starting exactly at
/// the end of `meta.len()`.
///
/// This formulation allows us to reserve any number of IDs first from the freelist
/// and then from the new IDs, using only a single atomic subtract.
///
/// Once [`flush`] is done, `free_cursor` will equal `pending.len()`.
///
/// [`alloc`]: Entities::alloc
/// [`reserve_entity`]: Entities::reserve_entity
/// [`reserve_entities`]: Entities::reserve_entities
/// [`flush`]: Entities::flush
pending: Vec<u32>,
free_cursor: AtomicIdCursor,
/// Stores the number of free entities for [`len`](Entities::len)
len: u32,
}
impl Entities {
pub(crate) const fn new() -> Self {
Entities {
meta: Vec::new(),
pending: Vec::new(),
free_cursor: AtomicIdCursor::new(0),
len: 0,
}
}
/// Reserve entity IDs concurrently.
///
/// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush).
pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator {
// Use one atomic subtract to grab a range of new IDs. The range might be
// entirely nonnegative, meaning all IDs come from the freelist, or entirely
// negative, meaning they are all new IDs to allocate, or a mix of both.
let range_end = self
.free_cursor
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
.fetch_sub(IdCursor::try_from(count).unwrap(), Ordering::Relaxed);
let range_start = range_end - IdCursor::try_from(count).unwrap();
let freelist_range = range_start.max(0) as usize..range_end.max(0) as usize;
let (new_id_start, new_id_end) = if range_start >= 0 {
// We satisfied all requests from the freelist.
(0, 0)
} else {
// We need to allocate some new Entity IDs outside of the range of self.meta.
//
// `range_start` covers some negative territory, e.g. `-3..6`.
// Since the nonnegative values `0..6` are handled by the freelist, that
// means we need to handle the negative range here.
//
// In this example, we truncate the end to 0, leaving us with `-3..0`.
// Then we negate these values to indicate how far beyond the end of `meta.end()`
// to go, yielding `meta.len()+0.. meta.len()+3`.
let base = self.meta.len() as IdCursor;
let new_id_end = u32::try_from(base - range_start).expect("too many entities");
// `new_id_end` is in range, so no need to check `start`.
let new_id_start = (base - range_end.min(0)) as u32;
(new_id_start, new_id_end)
};
ReserveEntitiesIterator {
meta: &self.meta[..],
index_iter: self.pending[freelist_range].iter(),
index_range: new_id_start..new_id_end,
}
}
/// Reserve one entity ID concurrently.
///
/// Equivalent to `self.reserve_entities(1).next().unwrap()`, but more efficient.
pub fn reserve_entity(&self) -> Entity {
let n = self.free_cursor.fetch_sub(1, Ordering::Relaxed);
if n > 0 {
// Allocate from the freelist.
let index = self.pending[(n - 1) as usize];
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
// Grab a new ID, outside the range of `meta.len()`. `flush()` must
// eventually be called to make it valid.
//
// As `self.free_cursor` goes more and more negative, we return IDs farther
// and farther beyond `meta.len()`.
Entity {
generation: 0,
index: u32::try_from(self.meta.len() as IdCursor - n).expect("too many entities"),
}
}
}
/// Check that we do not have pending work requiring `flush()` to be called.
fn verify_flushed(&mut self) {
debug_assert!(
!self.needs_flush(),
"flush() needs to be called before this operation is legal"
);
}
/// Allocate an entity ID directly.
pub fn alloc(&mut self) -> Entity {
self.verify_flushed();
self.len += 1;
if let Some(index) = self.pending.pop() {
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
Entity {
generation: self.meta[index as usize].generation,
index,
}
} else {
let index = u32::try_from(self.meta.len()).expect("too many entities");
self.meta.push(EntityMeta::EMPTY);
Entity {
generation: 0,
index,
}
}
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any. Location should be
/// written immediately.
pub fn alloc_at(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let loc = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
None
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
None
} else {
Some(mem::replace(
&mut self.meta[entity.index as usize].location,
EntityMeta::EMPTY.location,
))
};
self.meta[entity.index as usize].generation = entity.generation;
loc
}
/// Allocate a specific entity ID, overwriting its generation.
///
/// Returns the location of the entity currently using the given ID, if any.
pub(crate) fn alloc_at_without_replacement(
&mut self,
entity: Entity,
) -> AllocAtWithoutReplacement {
self.verify_flushed();
let result = if entity.index as usize >= self.meta.len() {
self.pending.extend((self.meta.len() as u32)..entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.meta
.resize(entity.index as usize + 1, EntityMeta::EMPTY);
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else if let Some(index) = self.pending.iter().position(|item| *item == entity.index) {
self.pending.swap_remove(index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len += 1;
AllocAtWithoutReplacement::DidNotExist
} else {
let current_meta = &self.meta[entity.index as usize];
if current_meta.location.archetype_id == ArchetypeId::INVALID {
AllocAtWithoutReplacement::DidNotExist
} else if current_meta.generation == entity.generation {
AllocAtWithoutReplacement::Exists(current_meta.location)
} else {
return AllocAtWithoutReplacement::ExistsWithWrongGeneration;
}
};
self.meta[entity.index as usize].generation = entity.generation;
result
}
/// Destroy an entity, allowing it to be reused.
///
/// Must not be called while reserved entities are awaiting `flush()`.
pub fn free(&mut self, entity: Entity) -> Option<EntityLocation> {
self.verify_flushed();
let meta = &mut self.meta[entity.index as usize];
if meta.generation!= entity.generation {
return None;
}
meta.generation += 1;
let loc = mem::replace(&mut meta.location, EntityMeta::EMPTY.location);
self.pending.push(entity.index);
let new_free_cursor = self.pending.len() as IdCursor;
*self.free_cursor.get_mut() = new_free_cursor;
self.len -= 1;
Some(loc)
}
/// Ensure at least `n` allocations can succeed without reallocating.
pub fn reserve(&mut self, additional: u32) {
self.verify_flushed();
let freelist_size = *self.free_cursor.get_mut();
// Unwrap: these conversions can only fail on platforms that don't support 64-bit atomics
// and use AtomicIsize instead (see note on `IdCursor`).
let shortfall = IdCursor::try_from(additional).unwrap() - freelist_size;
if shortfall > 0 {
self.meta.reserve(shortfall as usize);
}
}
/// Returns true if the [`Entities`] contains [`entity`](Entity).
// This will return false for entities which have been freed, even if
// not reallocated since the generation is incremented in `free`
pub fn contains(&self, entity: Entity) -> bool {
self.resolve_from_id(entity.index())
.map_or(false, |e| e.generation() == entity.generation)
}
/// Clears all [`Entity`] from the World.
pub fn clear(&mut self) {
self.meta.clear();
self.pending.clear();
*self.free_cursor.get_mut() = 0;
self.len = 0;
}
/// Returns the location of an [`Entity`].
/// Note: for pending entities, returns `Some(EntityLocation::INVALID)`.
#[inline]
pub fn get(&self, entity: Entity) -> Option<EntityLocation> {
if let Some(meta) = self.meta.get(entity.index as usize) {
if meta.generation!= entity.generation
|| meta.location.archetype_id == ArchetypeId::INVALID
{
return None;
}
Some(meta.location)
} else {
None
}
}
/// Updates the location of an [`Entity`]. This must be called when moving the components of
/// the entity around in storage.
///
/// # Safety
/// - `index` must be a valid entity index.
/// - `location` must be valid for the entity at `index` or immediately made valid afterwards
/// before handing control to unknown code.
#[inline]
pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) |
/// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this
/// `index` will count `generation` starting from the prior `generation` + the specified
/// value + 1.
///
/// Does nothing if no entity with this `index` has been allocated yet.
pub(crate) fn reserve_generations(&mut self, index: u32, generations: u32) -> bool {
if (index as usize) >= self.meta.len() {
return false;
}
let meta = &mut self.meta[index as usize];
if meta.location.archetype_id == ArchetypeId::INVALID {
meta.generation += generations;
true
} else {
false
}
}
/// Get the [`Entity`] with a given id, if it exists in this [`Entities`] collection
/// Returns `None` if this [`Entity`] is outside of the range of currently reserved Entities
///
/// Note: This method may return [`Entities`](Entity) which are currently free
/// Note that [`contains`](Entities::contains) will correctly return false for freed
/// entities, since it checks the generation
pub fn resolve_from_id(&self, index: u32) -> Option<Entity> {
let idu = index as usize;
if let Some(&EntityMeta { generation,.. }) = self.meta.get(idu) {
Some(Entity { generation, index })
} else {
// `id` is outside of the meta list - check whether it is reserved but not yet flushed.
let free_cursor = self.free_cursor.load(Ordering::Relaxed);
// If this entity was manually created, then free_cursor might be positive
// Returning None handles that case correctly
let num_pending = usize::try_from(-free_cursor).ok()?;
(idu < self.meta.len() + num_pending).then_some(Entity {
generation: 0,
index,
})
}
}
fn needs_flush(&mut self) -> bool {
*self.free_cursor.get_mut()!= self.pending.len() as IdCursor
}
| {
// SAFETY: Caller guarantees that `index` a valid entity index
self.meta.get_unchecked_mut(index as usize).location = location;
} | identifier_body |
windows.rs | //! Windows-specific types for signal handling.
//!
//! This module is only defined on Windows and contains the primary `Event` type
//! for receiving notifications of events. These events are listened for via the
//! `SetConsoleCtrlHandler` function which receives events of the type
//! `CTRL_C_EVENT` and `CTRL_BREAK_EVENT`
#![cfg(windows)]
extern crate mio;
extern crate winapi;
use std::cell::RefCell;
use std::io;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Once, ONCE_INIT};
use futures::future;
use futures::stream::Fuse;
use futures::sync::mpsc;
use futures::sync::oneshot;
use futures::{Async, Future, IntoFuture, Poll, Stream};
use tokio_reactor::{Handle, PollEvented};
use mio::Ready;
use self::winapi::shared::minwindef::*;
use self::winapi::um::wincon::*;
use IoFuture;
extern "system" {
fn SetConsoleCtrlHandler(HandlerRoutine: usize, Add: BOOL) -> BOOL;
}
static INIT: Once = ONCE_INIT;
static mut GLOBAL_STATE: *mut GlobalState = 0 as *mut _;
/// Stream of events discovered via `SetConsoleCtrlHandler`.
///
/// This structure can be used to listen for events of the type `CTRL_C_EVENT`
/// and `CTRL_BREAK_EVENT`. The `Stream` trait is implemented for this struct
/// and will resolve for each notification received by the process. Note that
/// there are few limitations with this as well:
///
/// * A notification to this process notifies *all* `Event` streams for that
/// event type.
/// * Notifications to an `Event` stream **are coalesced** if they aren't
/// processed quickly enough. This means that if two notifications are
/// received back-to-back, then the stream may only receive one item about the
/// two notifications.
pub struct Event {
reg: PollEvented<MyRegistration>,
_finished: oneshot::Sender<()>,
}
struct GlobalState {
ready: mio::SetReadiness,
tx: mpsc::UnboundedSender<Message>,
ctrl_c: GlobalEventState,
ctrl_break: GlobalEventState,
}
struct GlobalEventState {
ready: AtomicBool,
}
enum Message {
NewEvent(DWORD, oneshot::Sender<io::Result<Event>>),
}
struct DriverTask {
handle: Handle,
reg: PollEvented<MyRegistration>,
rx: Fuse<mpsc::UnboundedReceiver<Message>>,
ctrl_c: EventState,
ctrl_break: EventState,
}
struct EventState {
tasks: Vec<(RefCell<oneshot::Receiver<()>>, mio::SetReadiness)>,
}
impl Event {
/// Creates a new stream listening for the `CTRL_C_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_c() -> IoFuture<Event> {
Event::ctrl_c_handle(&Handle::current())
}
/// Creates a new stream listening for the `CTRL_C_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_c_handle(handle: &Handle) -> IoFuture<Event> {
Event::new(CTRL_C_EVENT, handle)
}
/// Creates a new stream listening for the `CTRL_BREAK_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn | () -> IoFuture<Event> {
Event::ctrl_break_handle(&Handle::current())
}
/// Creates a new stream listening for the `CTRL_BREAK_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_break_handle(handle: &Handle) -> IoFuture<Event> {
Event::new(CTRL_BREAK_EVENT, handle)
}
fn new(signum: DWORD, handle: &Handle) -> IoFuture<Event> {
let mut init = None;
INIT.call_once(|| {
init = Some(global_init(handle));
});
let new_signal = future::lazy(move || {
let (tx, rx) = oneshot::channel();
let msg = Message::NewEvent(signum, tx);
let res = unsafe { (*GLOBAL_STATE).tx.clone().unbounded_send(msg) };
res.expect(
"failed to request a new signal stream, did the \
first event loop go away?",
);
rx.then(|r| r.unwrap())
});
match init {
Some(init) => Box::new(init.into_future().and_then(|()| new_signal)),
None => Box::new(new_signal),
}
}
}
impl Stream for Event {
type Item = ();
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<()>, io::Error> {
if!self.reg.poll_read_ready(Ready::readable())?.is_ready() {
return Ok(Async::NotReady);
}
self.reg.clear_read_ready(Ready::readable())?;
self.reg
.get_ref()
.inner
.borrow()
.as_ref()
.unwrap()
.1
.set_readiness(mio::Ready::empty())
.expect("failed to set readiness");
Ok(Async::Ready(Some(())))
}
}
fn global_init(handle: &Handle) -> io::Result<()> {
let (tx, rx) = mpsc::unbounded();
let reg = MyRegistration {
inner: RefCell::new(None),
};
let reg = try!(PollEvented::new_with_handle(reg, handle));
let ready = reg.get_ref().inner.borrow().as_ref().unwrap().1.clone();
unsafe {
let state = Box::new(GlobalState {
ready: ready,
ctrl_c: GlobalEventState {
ready: AtomicBool::new(false),
},
ctrl_break: GlobalEventState {
ready: AtomicBool::new(false),
},
tx: tx,
});
GLOBAL_STATE = Box::into_raw(state);
let rc = SetConsoleCtrlHandler(handler as usize, TRUE);
if rc == 0 {
Box::from_raw(GLOBAL_STATE);
GLOBAL_STATE = 0 as *mut _;
return Err(io::Error::last_os_error());
}
::tokio_executor::spawn(Box::new(DriverTask {
handle: handle.clone(),
rx: rx.fuse(),
reg: reg,
ctrl_c: EventState { tasks: Vec::new() },
ctrl_break: EventState { tasks: Vec::new() },
}));
Ok(())
}
}
impl Future for DriverTask {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.check_event_drops();
self.check_messages();
self.check_events().unwrap();
// TODO: when to finish this task?
Ok(Async::NotReady)
}
}
impl DriverTask {
fn check_event_drops(&mut self) {
self.ctrl_c
.tasks
.retain(|task|!task.0.borrow_mut().poll().is_err());
self.ctrl_break
.tasks
.retain(|task|!task.0.borrow_mut().poll().is_err());
}
fn check_messages(&mut self) {
loop {
// Acquire the next message
let message = match self.rx.poll().unwrap() {
Async::Ready(Some(e)) => e,
Async::Ready(None) | Async::NotReady => break,
};
let (sig, complete) = match message {
Message::NewEvent(sig, complete) => (sig, complete),
};
let event = if sig == CTRL_C_EVENT {
&mut self.ctrl_c
} else {
&mut self.ctrl_break
};
// Acquire the (registration, set_readiness) pair by... assuming
// we're on the event loop (true because of the spawn above).
let reg = MyRegistration {
inner: RefCell::new(None),
};
let reg = match PollEvented::new_with_handle(reg, &self.handle) {
Ok(reg) => reg,
Err(e) => {
drop(complete.send(Err(e)));
continue;
}
};
// Create the `Event` to pass back and then also keep a handle to
// the `SetReadiness` for ourselves internally.
let (tx, rx) = oneshot::channel();
let ready = reg.get_ref().inner.borrow_mut().as_mut().unwrap().1.clone();
drop(complete.send(Ok(Event {
reg: reg,
_finished: tx,
})));
event.tasks.push((RefCell::new(rx), ready));
}
}
fn check_events(&mut self) -> io::Result<()> {
if self.reg.poll_read_ready(Ready::readable())?.is_not_ready() {
return Ok(());
}
self.reg.clear_read_ready(Ready::readable())?;
self.reg
.get_ref()
.inner
.borrow()
.as_ref()
.unwrap()
.1
.set_readiness(mio::Ready::empty())
.unwrap();
if unsafe { (*GLOBAL_STATE).ctrl_c.ready.swap(false, Ordering::SeqCst) } {
for task in self.ctrl_c.tasks.iter() {
task.1.set_readiness(mio::Ready::readable()).unwrap();
}
}
if unsafe {
(*GLOBAL_STATE)
.ctrl_break
.ready
.swap(false, Ordering::SeqCst)
} {
for task in self.ctrl_break.tasks.iter() {
task.1.set_readiness(mio::Ready::readable()).unwrap();
}
}
Ok(())
}
}
unsafe extern "system" fn handler(ty: DWORD) -> BOOL {
let event = match ty {
CTRL_C_EVENT => &(*GLOBAL_STATE).ctrl_c,
CTRL_BREAK_EVENT => &(*GLOBAL_STATE).ctrl_break,
_ => return FALSE,
};
if event.ready.swap(true, Ordering::SeqCst) {
FALSE
} else {
drop((*GLOBAL_STATE).ready.set_readiness(mio::Ready::readable()));
// TODO: this will report that we handled a CTRL_BREAK_EVENT when in
// fact we may not have any streams actually created for that
// event.
TRUE
}
}
struct MyRegistration {
inner: RefCell<Option<(mio::Registration, mio::SetReadiness)>>,
}
impl mio::Evented for MyRegistration {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
events: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
let reg = mio::Registration::new2();
reg.0.register(poll, token, events, opts)?;
*self.inner.borrow_mut() = Some(reg);
Ok(())
}
fn reregister(
&self,
_poll: &mio::Poll,
_token: mio::Token,
_events: mio::Ready,
_opts: mio::PollOpt,
) -> io::Result<()> {
Ok(())
}
fn deregister(&self, _poll: &mio::Poll) -> io::Result<()> {
Ok(())
}
}
| ctrl_break | identifier_name |
windows.rs | //! Windows-specific types for signal handling.
//!
//! This module is only defined on Windows and contains the primary `Event` type
//! for receiving notifications of events. These events are listened for via the
//! `SetConsoleCtrlHandler` function which receives events of the type
//! `CTRL_C_EVENT` and `CTRL_BREAK_EVENT`
#![cfg(windows)]
extern crate mio;
extern crate winapi;
use std::cell::RefCell;
use std::io;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Once, ONCE_INIT};
use futures::future;
use futures::stream::Fuse;
use futures::sync::mpsc;
use futures::sync::oneshot;
use futures::{Async, Future, IntoFuture, Poll, Stream};
use tokio_reactor::{Handle, PollEvented};
use mio::Ready;
use self::winapi::shared::minwindef::*;
use self::winapi::um::wincon::*;
use IoFuture;
extern "system" {
fn SetConsoleCtrlHandler(HandlerRoutine: usize, Add: BOOL) -> BOOL;
}
static INIT: Once = ONCE_INIT;
static mut GLOBAL_STATE: *mut GlobalState = 0 as *mut _;
/// Stream of events discovered via `SetConsoleCtrlHandler`.
///
/// This structure can be used to listen for events of the type `CTRL_C_EVENT`
/// and `CTRL_BREAK_EVENT`. The `Stream` trait is implemented for this struct
/// and will resolve for each notification received by the process. Note that
/// there are few limitations with this as well:
///
/// * A notification to this process notifies *all* `Event` streams for that
/// event type.
/// * Notifications to an `Event` stream **are coalesced** if they aren't
/// processed quickly enough. This means that if two notifications are
/// received back-to-back, then the stream may only receive one item about the
/// two notifications.
pub struct Event {
reg: PollEvented<MyRegistration>,
_finished: oneshot::Sender<()>,
}
struct GlobalState {
ready: mio::SetReadiness,
tx: mpsc::UnboundedSender<Message>,
ctrl_c: GlobalEventState,
ctrl_break: GlobalEventState,
}
struct GlobalEventState {
ready: AtomicBool,
}
enum Message {
NewEvent(DWORD, oneshot::Sender<io::Result<Event>>),
}
struct DriverTask {
handle: Handle,
reg: PollEvented<MyRegistration>,
rx: Fuse<mpsc::UnboundedReceiver<Message>>,
ctrl_c: EventState,
ctrl_break: EventState,
}
struct EventState {
tasks: Vec<(RefCell<oneshot::Receiver<()>>, mio::SetReadiness)>,
}
impl Event {
/// Creates a new stream listening for the `CTRL_C_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_c() -> IoFuture<Event> {
Event::ctrl_c_handle(&Handle::current())
}
/// Creates a new stream listening for the `CTRL_C_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_c_handle(handle: &Handle) -> IoFuture<Event> {
Event::new(CTRL_C_EVENT, handle)
}
/// Creates a new stream listening for the `CTRL_BREAK_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_break() -> IoFuture<Event> {
Event::ctrl_break_handle(&Handle::current())
}
/// Creates a new stream listening for the `CTRL_BREAK_EVENT` events.
///
/// This function will register a handler via `SetConsoleCtrlHandler` and
/// deliver notifications to the returned stream.
pub fn ctrl_break_handle(handle: &Handle) -> IoFuture<Event> {
Event::new(CTRL_BREAK_EVENT, handle)
}
fn new(signum: DWORD, handle: &Handle) -> IoFuture<Event> {
let mut init = None;
INIT.call_once(|| {
init = Some(global_init(handle));
});
let new_signal = future::lazy(move || {
let (tx, rx) = oneshot::channel();
let msg = Message::NewEvent(signum, tx);
let res = unsafe { (*GLOBAL_STATE).tx.clone().unbounded_send(msg) };
res.expect(
"failed to request a new signal stream, did the \
first event loop go away?",
);
rx.then(|r| r.unwrap())
});
match init {
Some(init) => Box::new(init.into_future().and_then(|()| new_signal)),
None => Box::new(new_signal),
}
}
}
impl Stream for Event {
type Item = ();
type Error = io::Error;
fn poll(&mut self) -> Poll<Option<()>, io::Error> {
if!self.reg.poll_read_ready(Ready::readable())?.is_ready() {
return Ok(Async::NotReady); | self.reg.clear_read_ready(Ready::readable())?;
self.reg
.get_ref()
.inner
.borrow()
.as_ref()
.unwrap()
.1
.set_readiness(mio::Ready::empty())
.expect("failed to set readiness");
Ok(Async::Ready(Some(())))
}
}
fn global_init(handle: &Handle) -> io::Result<()> {
let (tx, rx) = mpsc::unbounded();
let reg = MyRegistration {
inner: RefCell::new(None),
};
let reg = try!(PollEvented::new_with_handle(reg, handle));
let ready = reg.get_ref().inner.borrow().as_ref().unwrap().1.clone();
unsafe {
let state = Box::new(GlobalState {
ready: ready,
ctrl_c: GlobalEventState {
ready: AtomicBool::new(false),
},
ctrl_break: GlobalEventState {
ready: AtomicBool::new(false),
},
tx: tx,
});
GLOBAL_STATE = Box::into_raw(state);
let rc = SetConsoleCtrlHandler(handler as usize, TRUE);
if rc == 0 {
Box::from_raw(GLOBAL_STATE);
GLOBAL_STATE = 0 as *mut _;
return Err(io::Error::last_os_error());
}
::tokio_executor::spawn(Box::new(DriverTask {
handle: handle.clone(),
rx: rx.fuse(),
reg: reg,
ctrl_c: EventState { tasks: Vec::new() },
ctrl_break: EventState { tasks: Vec::new() },
}));
Ok(())
}
}
impl Future for DriverTask {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.check_event_drops();
self.check_messages();
self.check_events().unwrap();
// TODO: when to finish this task?
Ok(Async::NotReady)
}
}
impl DriverTask {
fn check_event_drops(&mut self) {
self.ctrl_c
.tasks
.retain(|task|!task.0.borrow_mut().poll().is_err());
self.ctrl_break
.tasks
.retain(|task|!task.0.borrow_mut().poll().is_err());
}
fn check_messages(&mut self) {
loop {
// Acquire the next message
let message = match self.rx.poll().unwrap() {
Async::Ready(Some(e)) => e,
Async::Ready(None) | Async::NotReady => break,
};
let (sig, complete) = match message {
Message::NewEvent(sig, complete) => (sig, complete),
};
let event = if sig == CTRL_C_EVENT {
&mut self.ctrl_c
} else {
&mut self.ctrl_break
};
// Acquire the (registration, set_readiness) pair by... assuming
// we're on the event loop (true because of the spawn above).
let reg = MyRegistration {
inner: RefCell::new(None),
};
let reg = match PollEvented::new_with_handle(reg, &self.handle) {
Ok(reg) => reg,
Err(e) => {
drop(complete.send(Err(e)));
continue;
}
};
// Create the `Event` to pass back and then also keep a handle to
// the `SetReadiness` for ourselves internally.
let (tx, rx) = oneshot::channel();
let ready = reg.get_ref().inner.borrow_mut().as_mut().unwrap().1.clone();
drop(complete.send(Ok(Event {
reg: reg,
_finished: tx,
})));
event.tasks.push((RefCell::new(rx), ready));
}
}
fn check_events(&mut self) -> io::Result<()> {
if self.reg.poll_read_ready(Ready::readable())?.is_not_ready() {
return Ok(());
}
self.reg.clear_read_ready(Ready::readable())?;
self.reg
.get_ref()
.inner
.borrow()
.as_ref()
.unwrap()
.1
.set_readiness(mio::Ready::empty())
.unwrap();
if unsafe { (*GLOBAL_STATE).ctrl_c.ready.swap(false, Ordering::SeqCst) } {
for task in self.ctrl_c.tasks.iter() {
task.1.set_readiness(mio::Ready::readable()).unwrap();
}
}
if unsafe {
(*GLOBAL_STATE)
.ctrl_break
.ready
.swap(false, Ordering::SeqCst)
} {
for task in self.ctrl_break.tasks.iter() {
task.1.set_readiness(mio::Ready::readable()).unwrap();
}
}
Ok(())
}
}
unsafe extern "system" fn handler(ty: DWORD) -> BOOL {
let event = match ty {
CTRL_C_EVENT => &(*GLOBAL_STATE).ctrl_c,
CTRL_BREAK_EVENT => &(*GLOBAL_STATE).ctrl_break,
_ => return FALSE,
};
if event.ready.swap(true, Ordering::SeqCst) {
FALSE
} else {
drop((*GLOBAL_STATE).ready.set_readiness(mio::Ready::readable()));
// TODO: this will report that we handled a CTRL_BREAK_EVENT when in
// fact we may not have any streams actually created for that
// event.
TRUE
}
}
struct MyRegistration {
inner: RefCell<Option<(mio::Registration, mio::SetReadiness)>>,
}
impl mio::Evented for MyRegistration {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
events: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
let reg = mio::Registration::new2();
reg.0.register(poll, token, events, opts)?;
*self.inner.borrow_mut() = Some(reg);
Ok(())
}
fn reregister(
&self,
_poll: &mio::Poll,
_token: mio::Token,
_events: mio::Ready,
_opts: mio::PollOpt,
) -> io::Result<()> {
Ok(())
}
fn deregister(&self, _poll: &mio::Poll) -> io::Result<()> {
Ok(())
}
} | } | random_line_split |
extern_crate.rs | //! Provides handling of `extern_crate` attributes.
//!
//! # What the generated code looks like
//!
//! ```rust,ignore
//! #[pre::extern_crate(std)]
//! mod pre_std {
//! mod ptr {
//! #[pre(valid_ptr(src, r))]
//! unsafe fn read<T>(src: *const T) -> T;
//!
//! impl<T> NonNull<T> {
//! #[pre(!ptr.is_null())]
//! const unsafe fn new_unchecked(ptr: *mut T) -> NonNull<T>;
//! }
//! }
//! }
//! ```
//!
//! turns into
//!
//! ```rust,ignore
//! #[doc = "..."]
//! mod pre_std {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::*;
//!
//! #[doc = "..."]
//! pub(crate) mod ptr {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::ptr::*;
//!
//! #[doc = "..."]
//! #[pre(!ptr.is_null())]
//! #[pre(no_doc)]
//! #[pre(no_debug_assert)]
//! #[inline(always)]
//! #[allow(non_snake_case)]
//! pub(crate) fn NonNull__impl__new_unchecked__() {}
//!
//! #[pre(valid_ptr(src, r))]
//! #[inline(always)]
//! pub(crate) unsafe fn read<T>(src: *const T) -> T {
//! std::ptr::read(src)
//! }
//! }
//! }
//! ```
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, TokenStreamExt};
use std::fmt;
use syn::{
braced,
parse::{Parse, ParseStream},
spanned::Spanned,
token::Brace,
Attribute, FnArg, ForeignItemFn, Ident, ItemUse, Path, PathArguments, PathSegment, Token,
Visibility,
};
use crate::{
documentation::{generate_extern_crate_fn_docs, generate_module_docs},
helpers::{visit_matching_attrs_parsed_mut, AttributeAction, CRATE_NAME},
pre_attr::PreAttr,
};
pub(crate) use impl_block::{impl_block_stub_name, ImplBlock};
mod impl_block;
/// The parsed version of the `extern_crate` attribute content.
pub(crate) struct ExternCrateAttr {
/// The path of the crate/module to which function calls will be forwarded.
path: Path,
}
impl fmt::Display for ExternCrateAttr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#[extern_crate(")?;
if self.path.leading_colon.is_some() {
write!(f, "::")?;
}
for segment in &self.path.segments {
write!(f, "{}", segment.ident)?;
}
write!(f, ")]")
}
}
impl Parse for ExternCrateAttr {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(ExternCrateAttr {
path: input.call(Path::parse_mod_style)?,
})
}
}
/// A parsed `extern_crate` annotated module.
pub(crate) struct Module {
/// The attributes on the module.
attrs: Vec<Attribute>,
/// The visibility on the module.
visibility: Visibility,
/// The `mod` token.
mod_token: Token![mod],
/// The name of the module.
ident: Ident,
/// The braces surrounding the content.
braces: Brace,
/// The impl blocks contained in the module.
impl_blocks: Vec<ImplBlock>,
/// The imports contained in the module.
imports: Vec<ItemUse>,
/// The functions contained in the module.
functions: Vec<ForeignItemFn>,
/// The submodules contained in the module.
modules: Vec<Module>,
}
impl fmt::Display for Module {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.original_token_stream())
}
}
impl Spanned for Module {
fn span(&self) -> Span {
self.visibility
.span()
.join(self.braces.span)
.unwrap_or(self.braces.span)
}
}
impl Parse for Module {
fn parse(input: ParseStream) -> syn::Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
let visibility = input.parse()?;
let mod_token = input.parse()?;
let ident = input.parse()?;
let content;
let braces = braced!(content in input);
let mut impl_blocks = Vec::new();
let mut imports = Vec::new();
let mut functions = Vec::new();
let mut modules = Vec::new();
while!content.is_empty() {
if content.peek(Token![impl]) {
impl_blocks.push(content.parse()?);
} else if <ItemUse as Parse>::parse(&content.fork()).is_ok() {
imports.push(content.parse()?);
} else if <ForeignItemFn as Parse>::parse(&content.fork()).is_ok() {
functions.push(content.parse()?);
} else {
modules.push(content.parse().map_err(|err| {
syn::Error::new(
err.span(),
"expected a module, a function signature, an impl block or a use statement",
)
})?);
}
}
Ok(Module {
attrs,
visibility,
mod_token,
ident,
braces,
impl_blocks,
imports,
functions,
modules,
})
}
}
impl Module {
/// Renders this `extern_crate` annotated module to its final result.
pub(crate) fn render(&self, attr: ExternCrateAttr) -> TokenStream {
let mut tokens = TokenStream::new();
self.render_inner(attr.path, &mut tokens, None, &self.ident);
tokens
}
/// A helper function to generate the final token stream.
///
/// This allows passing the top level visibility and the updated path into recursive calls.
fn render_inner(
&self,
mut path: Path,
tokens: &mut TokenStream,
visibility: Option<&TokenStream>,
top_level_module: &Ident,
) {
if visibility.is_some() {
// Update the path only in recursive calls.
path.segments.push(PathSegment {
ident: self.ident.clone(),
arguments: PathArguments::None,
});
}
let mut attrs = self.attrs.clone();
let mut render_docs = true;
visit_matching_attrs_parsed_mut(&mut attrs, "pre", |attr| match attr.content() {
PreAttr::NoDoc(_) => {
render_docs = false;
AttributeAction::Remove
}
_ => AttributeAction::Keep,
});
if render_docs {
let docs = generate_module_docs(self, &path);
tokens.append_all(quote! { #docs });
}
tokens.append_all(attrs);
let visibility = if let Some(visibility) = visibility {
// We're in a recursive call.
// Use the visibility passed to us.
tokens.append_all(quote! { #visibility });
visibility.clone()
} else {
// We're in the outermost call.
// Use the original visibility and decide which visibility to use in recursive calls.
let local_vis = &self.visibility;
tokens.append_all(quote! { #local_vis });
if let Visibility::Public(pub_keyword) = local_vis {
quote! { #pub_keyword }
} else {
let span = match local_vis {
Visibility::Inherited => self.mod_token.span(),
_ => local_vis.span(),
};
quote_spanned! { span=> pub(crate) }
}
};
let mod_token = self.mod_token;
tokens.append_all(quote! { #mod_token });
tokens.append(self.ident.clone());
let mut brace_content = TokenStream::new();
let crate_name = Ident::new(&CRATE_NAME, Span::call_site());
brace_content.append_all(quote! {
#[allow(unused_imports)]
#[doc(no_inline)]
#visibility use #path::*;
#[allow(unused_imports)]
use #crate_name::pre;
});
for impl_block in &self.impl_blocks {
impl_block.render(&mut brace_content, &path, &visibility, top_level_module);
}
for import in &self.imports {
brace_content.append_all(quote! { #import });
}
for function in &self.functions {
render_function(function, &mut brace_content, &path, &visibility);
}
for module in &self.modules {
module.render_inner(
path.clone(),
&mut brace_content,
Some(&visibility),
top_level_module,
);
}
tokens.append_all(quote_spanned! { self.braces.span=> { #brace_content } });
}
/// Generates a token stream that is semantically equivalent to the original token stream.
///
/// This should only be used for debug purposes.
fn original_token_stream(&self) -> TokenStream {
let mut stream = TokenStream::new();
stream.append_all(&self.attrs);
let vis = &self.visibility;
stream.append_all(quote! { #vis });
stream.append_all(quote! { mod });
stream.append(self.ident.clone());
let mut content = TokenStream::new();
content.append_all(
self.impl_blocks
.iter()
.map(|impl_block| impl_block.original_token_stream()),
);
content.append_all(&self.imports);
content.append_all(&self.functions);
content.append_all(self.modules.iter().map(|m| m.original_token_stream()));
stream.append_all(quote! { { #content } });
stream
}
}
/// Generates the code for a function inside a `extern_crate` module.
fn render_function(
function: &ForeignItemFn,
tokens: &mut TokenStream,
path: &Path,
visibility: &TokenStream,
) {
tokens.append_all(&function.attrs);
let doc_header = generate_extern_crate_fn_docs(path, &function.sig, function.span());
tokens.append_all(quote! { #doc_header });
tokens.append_all(quote_spanned! { function.span()=> #[inline(always)] });
tokens.append_all(visibility.clone().into_iter().map(|mut token| {
token.set_span(function.span());
token
}));
let signature = &function.sig;
tokens.append_all(quote! { #signature });
let mut path = path.clone();
path.segments.push(PathSegment {
ident: function.sig.ident.clone(), | // Update the spans of the `::` tokens to lie in the function
for punct in path
.segments
.pairs_mut()
.map(|p| p.into_tuple().1)
.flatten()
{
punct.spans = [function.span(); 2];
}
let mut args_list = TokenStream::new();
args_list.append_separated(
function.sig.inputs.iter().map(|arg| match arg {
FnArg::Receiver(_) => unreachable!("receiver is not valid in a function argument list"),
FnArg::Typed(pat) => &pat.pat,
}),
quote_spanned! { function.span()=>, },
);
tokens.append_all(quote_spanned! { function.span()=> { #path(#args_list) } });
} | arguments: PathArguments::None,
});
| random_line_split |
extern_crate.rs | //! Provides handling of `extern_crate` attributes.
//!
//! # What the generated code looks like
//!
//! ```rust,ignore
//! #[pre::extern_crate(std)]
//! mod pre_std {
//! mod ptr {
//! #[pre(valid_ptr(src, r))]
//! unsafe fn read<T>(src: *const T) -> T;
//!
//! impl<T> NonNull<T> {
//! #[pre(!ptr.is_null())]
//! const unsafe fn new_unchecked(ptr: *mut T) -> NonNull<T>;
//! }
//! }
//! }
//! ```
//!
//! turns into
//!
//! ```rust,ignore
//! #[doc = "..."]
//! mod pre_std {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::*;
//!
//! #[doc = "..."]
//! pub(crate) mod ptr {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::ptr::*;
//!
//! #[doc = "..."]
//! #[pre(!ptr.is_null())]
//! #[pre(no_doc)]
//! #[pre(no_debug_assert)]
//! #[inline(always)]
//! #[allow(non_snake_case)]
//! pub(crate) fn NonNull__impl__new_unchecked__() {}
//!
//! #[pre(valid_ptr(src, r))]
//! #[inline(always)]
//! pub(crate) unsafe fn read<T>(src: *const T) -> T {
//! std::ptr::read(src)
//! }
//! }
//! }
//! ```
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, TokenStreamExt};
use std::fmt;
use syn::{
braced,
parse::{Parse, ParseStream},
spanned::Spanned,
token::Brace,
Attribute, FnArg, ForeignItemFn, Ident, ItemUse, Path, PathArguments, PathSegment, Token,
Visibility,
};
use crate::{
documentation::{generate_extern_crate_fn_docs, generate_module_docs},
helpers::{visit_matching_attrs_parsed_mut, AttributeAction, CRATE_NAME},
pre_attr::PreAttr,
};
pub(crate) use impl_block::{impl_block_stub_name, ImplBlock};
mod impl_block;
/// The parsed version of the `extern_crate` attribute content.
pub(crate) struct ExternCrateAttr {
/// The path of the crate/module to which function calls will be forwarded.
path: Path,
}
impl fmt::Display for ExternCrateAttr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#[extern_crate(")?;
if self.path.leading_colon.is_some() {
write!(f, "::")?;
}
for segment in &self.path.segments {
write!(f, "{}", segment.ident)?;
}
write!(f, ")]")
}
}
impl Parse for ExternCrateAttr {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(ExternCrateAttr {
path: input.call(Path::parse_mod_style)?,
})
}
}
/// A parsed `extern_crate` annotated module.
pub(crate) struct Module {
/// The attributes on the module.
attrs: Vec<Attribute>,
/// The visibility on the module.
visibility: Visibility,
/// The `mod` token.
mod_token: Token![mod],
/// The name of the module.
ident: Ident,
/// The braces surrounding the content.
braces: Brace,
/// The impl blocks contained in the module.
impl_blocks: Vec<ImplBlock>,
/// The imports contained in the module.
imports: Vec<ItemUse>,
/// The functions contained in the module.
functions: Vec<ForeignItemFn>,
/// The submodules contained in the module.
modules: Vec<Module>,
}
impl fmt::Display for Module {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.original_token_stream())
}
}
impl Spanned for Module {
fn span(&self) -> Span {
self.visibility
.span()
.join(self.braces.span)
.unwrap_or(self.braces.span)
}
}
impl Parse for Module {
fn parse(input: ParseStream) -> syn::Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
let visibility = input.parse()?;
let mod_token = input.parse()?;
let ident = input.parse()?;
let content;
let braces = braced!(content in input);
let mut impl_blocks = Vec::new();
let mut imports = Vec::new();
let mut functions = Vec::new();
let mut modules = Vec::new();
while!content.is_empty() {
if content.peek(Token![impl]) {
impl_blocks.push(content.parse()?);
} else if <ItemUse as Parse>::parse(&content.fork()).is_ok() {
imports.push(content.parse()?);
} else if <ForeignItemFn as Parse>::parse(&content.fork()).is_ok() {
functions.push(content.parse()?);
} else {
modules.push(content.parse().map_err(|err| {
syn::Error::new(
err.span(),
"expected a module, a function signature, an impl block or a use statement",
)
})?);
}
}
Ok(Module {
attrs,
visibility,
mod_token,
ident,
braces,
impl_blocks,
imports,
functions,
modules,
})
}
}
impl Module {
/// Renders this `extern_crate` annotated module to its final result.
pub(crate) fn render(&self, attr: ExternCrateAttr) -> TokenStream {
let mut tokens = TokenStream::new();
self.render_inner(attr.path, &mut tokens, None, &self.ident);
tokens
}
/// A helper function to generate the final token stream.
///
/// This allows passing the top level visibility and the updated path into recursive calls.
fn render_inner(
&self,
mut path: Path,
tokens: &mut TokenStream,
visibility: Option<&TokenStream>,
top_level_module: &Ident,
) {
if visibility.is_some() {
// Update the path only in recursive calls.
path.segments.push(PathSegment {
ident: self.ident.clone(),
arguments: PathArguments::None,
});
}
let mut attrs = self.attrs.clone();
let mut render_docs = true;
visit_matching_attrs_parsed_mut(&mut attrs, "pre", |attr| match attr.content() {
PreAttr::NoDoc(_) => {
render_docs = false;
AttributeAction::Remove
}
_ => AttributeAction::Keep,
});
if render_docs {
let docs = generate_module_docs(self, &path);
tokens.append_all(quote! { #docs });
}
tokens.append_all(attrs);
let visibility = if let Some(visibility) = visibility {
// We're in a recursive call.
// Use the visibility passed to us.
tokens.append_all(quote! { #visibility });
visibility.clone()
} else {
// We're in the outermost call.
// Use the original visibility and decide which visibility to use in recursive calls.
let local_vis = &self.visibility;
tokens.append_all(quote! { #local_vis });
if let Visibility::Public(pub_keyword) = local_vis {
quote! { #pub_keyword }
} else {
let span = match local_vis {
Visibility::Inherited => self.mod_token.span(),
_ => local_vis.span(),
};
quote_spanned! { span=> pub(crate) }
}
};
let mod_token = self.mod_token;
tokens.append_all(quote! { #mod_token });
tokens.append(self.ident.clone());
let mut brace_content = TokenStream::new();
let crate_name = Ident::new(&CRATE_NAME, Span::call_site());
brace_content.append_all(quote! {
#[allow(unused_imports)]
#[doc(no_inline)]
#visibility use #path::*;
#[allow(unused_imports)]
use #crate_name::pre;
});
for impl_block in &self.impl_blocks {
impl_block.render(&mut brace_content, &path, &visibility, top_level_module);
}
for import in &self.imports {
brace_content.append_all(quote! { #import });
}
for function in &self.functions {
render_function(function, &mut brace_content, &path, &visibility);
}
for module in &self.modules {
module.render_inner(
path.clone(),
&mut brace_content,
Some(&visibility),
top_level_module,
);
}
tokens.append_all(quote_spanned! { self.braces.span=> { #brace_content } });
}
/// Generates a token stream that is semantically equivalent to the original token stream.
///
/// This should only be used for debug purposes.
fn original_token_stream(&self) -> TokenStream | stream
}
}
/// Generates the code for a function inside a `extern_crate` module.
fn render_function(
function: &ForeignItemFn,
tokens: &mut TokenStream,
path: &Path,
visibility: &TokenStream,
) {
tokens.append_all(&function.attrs);
let doc_header = generate_extern_crate_fn_docs(path, &function.sig, function.span());
tokens.append_all(quote! { #doc_header });
tokens.append_all(quote_spanned! { function.span()=> #[inline(always)] });
tokens.append_all(visibility.clone().into_iter().map(|mut token| {
token.set_span(function.span());
token
}));
let signature = &function.sig;
tokens.append_all(quote! { #signature });
let mut path = path.clone();
path.segments.push(PathSegment {
ident: function.sig.ident.clone(),
arguments: PathArguments::None,
});
// Update the spans of the `::` tokens to lie in the function
for punct in path
.segments
.pairs_mut()
.map(|p| p.into_tuple().1)
.flatten()
{
punct.spans = [function.span(); 2];
}
let mut args_list = TokenStream::new();
args_list.append_separated(
function.sig.inputs.iter().map(|arg| match arg {
FnArg::Receiver(_) => unreachable!("receiver is not valid in a function argument list"),
FnArg::Typed(pat) => &pat.pat,
}),
quote_spanned! { function.span()=>, },
);
tokens.append_all(quote_spanned! { function.span()=> { #path(#args_list) } });
}
| {
let mut stream = TokenStream::new();
stream.append_all(&self.attrs);
let vis = &self.visibility;
stream.append_all(quote! { #vis });
stream.append_all(quote! { mod });
stream.append(self.ident.clone());
let mut content = TokenStream::new();
content.append_all(
self.impl_blocks
.iter()
.map(|impl_block| impl_block.original_token_stream()),
);
content.append_all(&self.imports);
content.append_all(&self.functions);
content.append_all(self.modules.iter().map(|m| m.original_token_stream()));
stream.append_all(quote! { { #content } });
| identifier_body |
extern_crate.rs | //! Provides handling of `extern_crate` attributes.
//!
//! # What the generated code looks like
//!
//! ```rust,ignore
//! #[pre::extern_crate(std)]
//! mod pre_std {
//! mod ptr {
//! #[pre(valid_ptr(src, r))]
//! unsafe fn read<T>(src: *const T) -> T;
//!
//! impl<T> NonNull<T> {
//! #[pre(!ptr.is_null())]
//! const unsafe fn new_unchecked(ptr: *mut T) -> NonNull<T>;
//! }
//! }
//! }
//! ```
//!
//! turns into
//!
//! ```rust,ignore
//! #[doc = "..."]
//! mod pre_std {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::*;
//!
//! #[doc = "..."]
//! pub(crate) mod ptr {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::ptr::*;
//!
//! #[doc = "..."]
//! #[pre(!ptr.is_null())]
//! #[pre(no_doc)]
//! #[pre(no_debug_assert)]
//! #[inline(always)]
//! #[allow(non_snake_case)]
//! pub(crate) fn NonNull__impl__new_unchecked__() {}
//!
//! #[pre(valid_ptr(src, r))]
//! #[inline(always)]
//! pub(crate) unsafe fn read<T>(src: *const T) -> T {
//! std::ptr::read(src)
//! }
//! }
//! }
//! ```
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, TokenStreamExt};
use std::fmt;
use syn::{
braced,
parse::{Parse, ParseStream},
spanned::Spanned,
token::Brace,
Attribute, FnArg, ForeignItemFn, Ident, ItemUse, Path, PathArguments, PathSegment, Token,
Visibility,
};
use crate::{
documentation::{generate_extern_crate_fn_docs, generate_module_docs},
helpers::{visit_matching_attrs_parsed_mut, AttributeAction, CRATE_NAME},
pre_attr::PreAttr,
};
pub(crate) use impl_block::{impl_block_stub_name, ImplBlock};
mod impl_block;
/// The parsed version of the `extern_crate` attribute content.
pub(crate) struct ExternCrateAttr {
/// The path of the crate/module to which function calls will be forwarded.
path: Path,
}
impl fmt::Display for ExternCrateAttr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#[extern_crate(")?;
if self.path.leading_colon.is_some() {
write!(f, "::")?;
}
for segment in &self.path.segments {
write!(f, "{}", segment.ident)?;
}
write!(f, ")]")
}
}
impl Parse for ExternCrateAttr {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(ExternCrateAttr {
path: input.call(Path::parse_mod_style)?,
})
}
}
/// A parsed `extern_crate` annotated module.
pub(crate) struct Module {
/// The attributes on the module.
attrs: Vec<Attribute>,
/// The visibility on the module.
visibility: Visibility,
/// The `mod` token.
mod_token: Token![mod],
/// The name of the module.
ident: Ident,
/// The braces surrounding the content.
braces: Brace,
/// The impl blocks contained in the module.
impl_blocks: Vec<ImplBlock>,
/// The imports contained in the module.
imports: Vec<ItemUse>,
/// The functions contained in the module.
functions: Vec<ForeignItemFn>,
/// The submodules contained in the module.
modules: Vec<Module>,
}
impl fmt::Display for Module {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.original_token_stream())
}
}
impl Spanned for Module {
fn span(&self) -> Span {
self.visibility
.span()
.join(self.braces.span)
.unwrap_or(self.braces.span)
}
}
impl Parse for Module {
fn | (input: ParseStream) -> syn::Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
let visibility = input.parse()?;
let mod_token = input.parse()?;
let ident = input.parse()?;
let content;
let braces = braced!(content in input);
let mut impl_blocks = Vec::new();
let mut imports = Vec::new();
let mut functions = Vec::new();
let mut modules = Vec::new();
while!content.is_empty() {
if content.peek(Token![impl]) {
impl_blocks.push(content.parse()?);
} else if <ItemUse as Parse>::parse(&content.fork()).is_ok() {
imports.push(content.parse()?);
} else if <ForeignItemFn as Parse>::parse(&content.fork()).is_ok() {
functions.push(content.parse()?);
} else {
modules.push(content.parse().map_err(|err| {
syn::Error::new(
err.span(),
"expected a module, a function signature, an impl block or a use statement",
)
})?);
}
}
Ok(Module {
attrs,
visibility,
mod_token,
ident,
braces,
impl_blocks,
imports,
functions,
modules,
})
}
}
impl Module {
/// Renders this `extern_crate` annotated module to its final result.
pub(crate) fn render(&self, attr: ExternCrateAttr) -> TokenStream {
let mut tokens = TokenStream::new();
self.render_inner(attr.path, &mut tokens, None, &self.ident);
tokens
}
/// A helper function to generate the final token stream.
///
/// This allows passing the top level visibility and the updated path into recursive calls.
fn render_inner(
&self,
mut path: Path,
tokens: &mut TokenStream,
visibility: Option<&TokenStream>,
top_level_module: &Ident,
) {
if visibility.is_some() {
// Update the path only in recursive calls.
path.segments.push(PathSegment {
ident: self.ident.clone(),
arguments: PathArguments::None,
});
}
let mut attrs = self.attrs.clone();
let mut render_docs = true;
visit_matching_attrs_parsed_mut(&mut attrs, "pre", |attr| match attr.content() {
PreAttr::NoDoc(_) => {
render_docs = false;
AttributeAction::Remove
}
_ => AttributeAction::Keep,
});
if render_docs {
let docs = generate_module_docs(self, &path);
tokens.append_all(quote! { #docs });
}
tokens.append_all(attrs);
let visibility = if let Some(visibility) = visibility {
// We're in a recursive call.
// Use the visibility passed to us.
tokens.append_all(quote! { #visibility });
visibility.clone()
} else {
// We're in the outermost call.
// Use the original visibility and decide which visibility to use in recursive calls.
let local_vis = &self.visibility;
tokens.append_all(quote! { #local_vis });
if let Visibility::Public(pub_keyword) = local_vis {
quote! { #pub_keyword }
} else {
let span = match local_vis {
Visibility::Inherited => self.mod_token.span(),
_ => local_vis.span(),
};
quote_spanned! { span=> pub(crate) }
}
};
let mod_token = self.mod_token;
tokens.append_all(quote! { #mod_token });
tokens.append(self.ident.clone());
let mut brace_content = TokenStream::new();
let crate_name = Ident::new(&CRATE_NAME, Span::call_site());
brace_content.append_all(quote! {
#[allow(unused_imports)]
#[doc(no_inline)]
#visibility use #path::*;
#[allow(unused_imports)]
use #crate_name::pre;
});
for impl_block in &self.impl_blocks {
impl_block.render(&mut brace_content, &path, &visibility, top_level_module);
}
for import in &self.imports {
brace_content.append_all(quote! { #import });
}
for function in &self.functions {
render_function(function, &mut brace_content, &path, &visibility);
}
for module in &self.modules {
module.render_inner(
path.clone(),
&mut brace_content,
Some(&visibility),
top_level_module,
);
}
tokens.append_all(quote_spanned! { self.braces.span=> { #brace_content } });
}
/// Generates a token stream that is semantically equivalent to the original token stream.
///
/// This should only be used for debug purposes.
fn original_token_stream(&self) -> TokenStream {
let mut stream = TokenStream::new();
stream.append_all(&self.attrs);
let vis = &self.visibility;
stream.append_all(quote! { #vis });
stream.append_all(quote! { mod });
stream.append(self.ident.clone());
let mut content = TokenStream::new();
content.append_all(
self.impl_blocks
.iter()
.map(|impl_block| impl_block.original_token_stream()),
);
content.append_all(&self.imports);
content.append_all(&self.functions);
content.append_all(self.modules.iter().map(|m| m.original_token_stream()));
stream.append_all(quote! { { #content } });
stream
}
}
/// Generates the code for a function inside a `extern_crate` module.
fn render_function(
function: &ForeignItemFn,
tokens: &mut TokenStream,
path: &Path,
visibility: &TokenStream,
) {
tokens.append_all(&function.attrs);
let doc_header = generate_extern_crate_fn_docs(path, &function.sig, function.span());
tokens.append_all(quote! { #doc_header });
tokens.append_all(quote_spanned! { function.span()=> #[inline(always)] });
tokens.append_all(visibility.clone().into_iter().map(|mut token| {
token.set_span(function.span());
token
}));
let signature = &function.sig;
tokens.append_all(quote! { #signature });
let mut path = path.clone();
path.segments.push(PathSegment {
ident: function.sig.ident.clone(),
arguments: PathArguments::None,
});
// Update the spans of the `::` tokens to lie in the function
for punct in path
.segments
.pairs_mut()
.map(|p| p.into_tuple().1)
.flatten()
{
punct.spans = [function.span(); 2];
}
let mut args_list = TokenStream::new();
args_list.append_separated(
function.sig.inputs.iter().map(|arg| match arg {
FnArg::Receiver(_) => unreachable!("receiver is not valid in a function argument list"),
FnArg::Typed(pat) => &pat.pat,
}),
quote_spanned! { function.span()=>, },
);
tokens.append_all(quote_spanned! { function.span()=> { #path(#args_list) } });
}
| parse | identifier_name |
extern_crate.rs | //! Provides handling of `extern_crate` attributes.
//!
//! # What the generated code looks like
//!
//! ```rust,ignore
//! #[pre::extern_crate(std)]
//! mod pre_std {
//! mod ptr {
//! #[pre(valid_ptr(src, r))]
//! unsafe fn read<T>(src: *const T) -> T;
//!
//! impl<T> NonNull<T> {
//! #[pre(!ptr.is_null())]
//! const unsafe fn new_unchecked(ptr: *mut T) -> NonNull<T>;
//! }
//! }
//! }
//! ```
//!
//! turns into
//!
//! ```rust,ignore
//! #[doc = "..."]
//! mod pre_std {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::*;
//!
//! #[doc = "..."]
//! pub(crate) mod ptr {
//! #[allow(unused_imports)]
//! use pre::pre;
//! #[allow(unused_imports)]
//! #[doc(no_inline)]
//! pub(crate) use std::ptr::*;
//!
//! #[doc = "..."]
//! #[pre(!ptr.is_null())]
//! #[pre(no_doc)]
//! #[pre(no_debug_assert)]
//! #[inline(always)]
//! #[allow(non_snake_case)]
//! pub(crate) fn NonNull__impl__new_unchecked__() {}
//!
//! #[pre(valid_ptr(src, r))]
//! #[inline(always)]
//! pub(crate) unsafe fn read<T>(src: *const T) -> T {
//! std::ptr::read(src)
//! }
//! }
//! }
//! ```
use proc_macro2::{Span, TokenStream};
use quote::{quote, quote_spanned, TokenStreamExt};
use std::fmt;
use syn::{
braced,
parse::{Parse, ParseStream},
spanned::Spanned,
token::Brace,
Attribute, FnArg, ForeignItemFn, Ident, ItemUse, Path, PathArguments, PathSegment, Token,
Visibility,
};
use crate::{
documentation::{generate_extern_crate_fn_docs, generate_module_docs},
helpers::{visit_matching_attrs_parsed_mut, AttributeAction, CRATE_NAME},
pre_attr::PreAttr,
};
pub(crate) use impl_block::{impl_block_stub_name, ImplBlock};
mod impl_block;
/// The parsed version of the `extern_crate` attribute content.
pub(crate) struct ExternCrateAttr {
/// The path of the crate/module to which function calls will be forwarded.
path: Path,
}
impl fmt::Display for ExternCrateAttr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#[extern_crate(")?;
if self.path.leading_colon.is_some() {
write!(f, "::")?;
}
for segment in &self.path.segments {
write!(f, "{}", segment.ident)?;
}
write!(f, ")]")
}
}
impl Parse for ExternCrateAttr {
fn parse(input: ParseStream) -> syn::Result<Self> {
Ok(ExternCrateAttr {
path: input.call(Path::parse_mod_style)?,
})
}
}
/// A parsed `extern_crate` annotated module.
pub(crate) struct Module {
/// The attributes on the module.
attrs: Vec<Attribute>,
/// The visibility on the module.
visibility: Visibility,
/// The `mod` token.
mod_token: Token![mod],
/// The name of the module.
ident: Ident,
/// The braces surrounding the content.
braces: Brace,
/// The impl blocks contained in the module.
impl_blocks: Vec<ImplBlock>,
/// The imports contained in the module.
imports: Vec<ItemUse>,
/// The functions contained in the module.
functions: Vec<ForeignItemFn>,
/// The submodules contained in the module.
modules: Vec<Module>,
}
impl fmt::Display for Module {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.original_token_stream())
}
}
impl Spanned for Module {
fn span(&self) -> Span {
self.visibility
.span()
.join(self.braces.span)
.unwrap_or(self.braces.span)
}
}
impl Parse for Module {
fn parse(input: ParseStream) -> syn::Result<Self> {
let attrs = input.call(Attribute::parse_outer)?;
let visibility = input.parse()?;
let mod_token = input.parse()?;
let ident = input.parse()?;
let content;
let braces = braced!(content in input);
let mut impl_blocks = Vec::new();
let mut imports = Vec::new();
let mut functions = Vec::new();
let mut modules = Vec::new();
while!content.is_empty() {
if content.peek(Token![impl]) | else if <ItemUse as Parse>::parse(&content.fork()).is_ok() {
imports.push(content.parse()?);
} else if <ForeignItemFn as Parse>::parse(&content.fork()).is_ok() {
functions.push(content.parse()?);
} else {
modules.push(content.parse().map_err(|err| {
syn::Error::new(
err.span(),
"expected a module, a function signature, an impl block or a use statement",
)
})?);
}
}
Ok(Module {
attrs,
visibility,
mod_token,
ident,
braces,
impl_blocks,
imports,
functions,
modules,
})
}
}
impl Module {
/// Renders this `extern_crate` annotated module to its final result.
pub(crate) fn render(&self, attr: ExternCrateAttr) -> TokenStream {
let mut tokens = TokenStream::new();
self.render_inner(attr.path, &mut tokens, None, &self.ident);
tokens
}
/// A helper function to generate the final token stream.
///
/// This allows passing the top level visibility and the updated path into recursive calls.
fn render_inner(
&self,
mut path: Path,
tokens: &mut TokenStream,
visibility: Option<&TokenStream>,
top_level_module: &Ident,
) {
if visibility.is_some() {
// Update the path only in recursive calls.
path.segments.push(PathSegment {
ident: self.ident.clone(),
arguments: PathArguments::None,
});
}
let mut attrs = self.attrs.clone();
let mut render_docs = true;
visit_matching_attrs_parsed_mut(&mut attrs, "pre", |attr| match attr.content() {
PreAttr::NoDoc(_) => {
render_docs = false;
AttributeAction::Remove
}
_ => AttributeAction::Keep,
});
if render_docs {
let docs = generate_module_docs(self, &path);
tokens.append_all(quote! { #docs });
}
tokens.append_all(attrs);
let visibility = if let Some(visibility) = visibility {
// We're in a recursive call.
// Use the visibility passed to us.
tokens.append_all(quote! { #visibility });
visibility.clone()
} else {
// We're in the outermost call.
// Use the original visibility and decide which visibility to use in recursive calls.
let local_vis = &self.visibility;
tokens.append_all(quote! { #local_vis });
if let Visibility::Public(pub_keyword) = local_vis {
quote! { #pub_keyword }
} else {
let span = match local_vis {
Visibility::Inherited => self.mod_token.span(),
_ => local_vis.span(),
};
quote_spanned! { span=> pub(crate) }
}
};
let mod_token = self.mod_token;
tokens.append_all(quote! { #mod_token });
tokens.append(self.ident.clone());
let mut brace_content = TokenStream::new();
let crate_name = Ident::new(&CRATE_NAME, Span::call_site());
brace_content.append_all(quote! {
#[allow(unused_imports)]
#[doc(no_inline)]
#visibility use #path::*;
#[allow(unused_imports)]
use #crate_name::pre;
});
for impl_block in &self.impl_blocks {
impl_block.render(&mut brace_content, &path, &visibility, top_level_module);
}
for import in &self.imports {
brace_content.append_all(quote! { #import });
}
for function in &self.functions {
render_function(function, &mut brace_content, &path, &visibility);
}
for module in &self.modules {
module.render_inner(
path.clone(),
&mut brace_content,
Some(&visibility),
top_level_module,
);
}
tokens.append_all(quote_spanned! { self.braces.span=> { #brace_content } });
}
/// Generates a token stream that is semantically equivalent to the original token stream.
///
/// This should only be used for debug purposes.
fn original_token_stream(&self) -> TokenStream {
let mut stream = TokenStream::new();
stream.append_all(&self.attrs);
let vis = &self.visibility;
stream.append_all(quote! { #vis });
stream.append_all(quote! { mod });
stream.append(self.ident.clone());
let mut content = TokenStream::new();
content.append_all(
self.impl_blocks
.iter()
.map(|impl_block| impl_block.original_token_stream()),
);
content.append_all(&self.imports);
content.append_all(&self.functions);
content.append_all(self.modules.iter().map(|m| m.original_token_stream()));
stream.append_all(quote! { { #content } });
stream
}
}
/// Generates the code for a function inside a `extern_crate` module.
fn render_function(
function: &ForeignItemFn,
tokens: &mut TokenStream,
path: &Path,
visibility: &TokenStream,
) {
tokens.append_all(&function.attrs);
let doc_header = generate_extern_crate_fn_docs(path, &function.sig, function.span());
tokens.append_all(quote! { #doc_header });
tokens.append_all(quote_spanned! { function.span()=> #[inline(always)] });
tokens.append_all(visibility.clone().into_iter().map(|mut token| {
token.set_span(function.span());
token
}));
let signature = &function.sig;
tokens.append_all(quote! { #signature });
let mut path = path.clone();
path.segments.push(PathSegment {
ident: function.sig.ident.clone(),
arguments: PathArguments::None,
});
// Update the spans of the `::` tokens to lie in the function
for punct in path
.segments
.pairs_mut()
.map(|p| p.into_tuple().1)
.flatten()
{
punct.spans = [function.span(); 2];
}
let mut args_list = TokenStream::new();
args_list.append_separated(
function.sig.inputs.iter().map(|arg| match arg {
FnArg::Receiver(_) => unreachable!("receiver is not valid in a function argument list"),
FnArg::Typed(pat) => &pat.pat,
}),
quote_spanned! { function.span()=>, },
);
tokens.append_all(quote_spanned! { function.span()=> { #path(#args_list) } });
}
| {
impl_blocks.push(content.parse()?);
} | conditional_block |
lib.rs | #[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::any::Any;
use std::borrow::Cow;
use std::fmt::Display;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use crossbeam::channel::{Receiver, Sender};
use tuikit::prelude::{Event as TermEvent, *};
pub use crate::ansi::AnsiString;
pub use crate::engine::fuzzy::FuzzyAlgorithm;
use crate::event::{EventReceiver, EventSender};
use crate::model::Model;
pub use crate::options::SkimOptions;
pub use crate::output::SkimOutput;
use crate::reader::Reader;
mod ansi;
mod engine;
mod event;
pub mod field;
mod global;
mod header;
mod helper;
mod input;
mod item;
mod matcher;
mod model;
mod options;
mod orderedvec;
mod output;
pub mod prelude;
mod previewer;
mod query;
mod reader;
mod selection;
mod spinlock;
mod theme;
mod util;
//------------------------------------------------------------------------------
pub trait AsAny {
fn as_any(&self) -> &dyn Any;
fn as_any_mut(&mut self) -> &mut dyn Any;
}
impl<T: Any> AsAny for T {
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
/// A `SkimItem` defines what's been processed(fetched, matched, previewed and returned) by skim
///
/// # Downcast Example
/// Skim will return the item back, but in `Arc<dyn SkimItem>` form. We might want a reference
/// to the concrete type instead of trait object. Skim provide a somehow "complicated" way to
/// `downcast` it back to the reference of the original concrete type.
///
/// ```rust
/// use skim::prelude::*;
///
/// struct MyItem {}
/// impl SkimItem for MyItem {
/// fn text(&self) -> Cow<str> {
/// unimplemented!()
/// }
/// }
///
/// impl MyItem {
/// pub fn mutable(&mut self) -> i32 {
/// 1
/// }
///
/// pub fn immutable(&self) -> i32 {
/// 0
/// }
/// }
///
/// let mut ret: Arc<dyn SkimItem> = Arc::new(MyItem{});
/// let mutable: &mut MyItem = Arc::get_mut(&mut ret)
/// .expect("item is referenced by others")
/// .as_any_mut() // cast to Any
/// .downcast_mut::<MyItem>() // downcast to (mut) concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(mutable.mutable(), 1);
///
/// let immutable: &MyItem = (*ret).as_any() // cast to Any
/// .downcast_ref::<MyItem>() // downcast to concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(immutable.immutable(), 0)
/// ```
pub trait SkimItem: AsAny + Send + Sync +'static {
/// The string to be used for matching (without color)
fn text(&self) -> Cow<str>;
/// The content to be displayed on the item list, could contain ANSI properties
fn display<'a>(&'a self, context: DisplayContext<'a>) -> AnsiString<'a> {
AnsiString::from(context)
}
/// Custom preview content, default to `ItemPreview::Global` which will use global preview
/// setting(i.e. the command set by `preview` option)
fn preview(&self, _context: PreviewContext) -> ItemPreview {
ItemPreview::Global
}
/// Get output text(after accept), default to `text()`
/// Note that this function is intended to be used by the caller of skim and will not be used by
/// skim. And since skim will return the item back in `SkimOutput`, if string is not what you
/// want, you could still use `downcast` to retain the pointer to the original struct.
fn output(&self) -> Cow<str> {
self.text()
}
/// we could limit the matching ranges of the `get_text` of the item.
/// providing (start_byte, end_byte) of the range
fn get_matching_ranges(&self) -> Option<&[(usize, usize)]> {
None
}
}
//------------------------------------------------------------------------------
// Implement SkimItem for raw strings
impl<T: AsRef<str> + Send + Sync +'static> SkimItem for T {
fn text(&self) -> Cow<str> |
}
//------------------------------------------------------------------------------
// Display Context
pub enum Matches<'a> {
None,
CharIndices(&'a [usize]),
CharRange(usize, usize),
ByteRange(usize, usize),
}
pub struct DisplayContext<'a> {
pub text: &'a str,
pub score: i32,
pub matches: Matches<'a>,
pub container_width: usize,
pub highlight_attr: Attr,
}
impl<'a> From<DisplayContext<'a>> for AnsiString<'a> {
fn from(context: DisplayContext<'a>) -> Self {
match context.matches {
Matches::CharIndices(indices) => AnsiString::from((context.text, indices, context.highlight_attr)),
Matches::CharRange(start, end) => {
AnsiString::new_str(context.text, vec![(context.highlight_attr, (start as u32, end as u32))])
}
Matches::ByteRange(start, end) => {
let ch_start = context.text[..start].chars().count();
let ch_end = ch_start + context.text[start..end].chars().count();
AnsiString::new_str(
context.text,
vec![(context.highlight_attr, (ch_start as u32, ch_end as u32))],
)
}
Matches::None => AnsiString::new_str(context.text, vec![]),
}
}
}
//------------------------------------------------------------------------------
// Preview Context
pub struct PreviewContext<'a> {
pub query: &'a str,
pub cmd_query: &'a str,
pub width: usize,
pub height: usize,
pub current_index: usize,
pub current_selection: &'a str,
/// selected item indices (may or may not include current item)
pub selected_indices: &'a [usize],
/// selected item texts (may or may not include current item)
pub selections: &'a [&'a str],
}
//------------------------------------------------------------------------------
// Preview
#[derive(Default, Copy, Clone, Debug)]
pub struct PreviewPosition {
pub h_scroll: Size,
pub h_offset: Size,
pub v_scroll: Size,
pub v_offset: Size,
}
pub enum ItemPreview {
/// execute the command and print the command's output
Command(String),
/// Display the prepared text(lines)
Text(String),
/// Display the colored text(lines)
AnsiText(String),
CommandWithPos(String, PreviewPosition),
TextWithPos(String, PreviewPosition),
AnsiWithPos(String, PreviewPosition),
/// Use global command settings to preview the item
Global,
}
//==============================================================================
// A match engine will execute the matching algorithm
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum CaseMatching {
Respect,
Ignore,
Smart,
}
impl Default for CaseMatching {
fn default() -> Self {
CaseMatching::Smart
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
#[allow(dead_code)]
pub enum MatchRange {
ByteRange(usize, usize),
// range of bytes
Chars(Vec<usize>), // individual character indices matched
}
pub type Rank = [i32; 4];
#[derive(Clone)]
pub struct MatchResult {
pub rank: Rank,
pub matched_range: MatchRange,
}
impl MatchResult {
pub fn range_char_indices(&self, text: &str) -> Vec<usize> {
match &self.matched_range {
&MatchRange::ByteRange(start, end) => {
let first = text[..start].chars().count();
let last = first + text[start..end].chars().count();
(first..last).collect()
}
MatchRange::Chars(vec) => vec.clone(),
}
}
}
pub trait MatchEngine: Sync + Send + Display {
fn match_item(&self, item: Arc<dyn SkimItem>) -> Option<MatchResult>;
}
pub trait MatchEngineFactory {
fn create_engine_with_case(&self, query: &str, case: CaseMatching) -> Box<dyn MatchEngine>;
fn create_engine(&self, query: &str) -> Box<dyn MatchEngine> {
self.create_engine_with_case(query, CaseMatching::default())
}
}
//------------------------------------------------------------------------------
// Preselection
/// A selector that determines whether an item should be "pre-selected" in multi-selection mode
pub trait Selector {
fn should_select(&self, index: usize, item: &dyn SkimItem) -> bool;
}
//------------------------------------------------------------------------------
pub type SkimItemSender = Sender<Arc<dyn SkimItem>>;
pub type SkimItemReceiver = Receiver<Arc<dyn SkimItem>>;
pub struct Skim {}
impl Skim {
/// params:
/// - options: the "complex" options that control how skim behaves
/// - source: a stream of items to be passed to skim for filtering.
/// If None is given, skim will invoke the command given to fetch the items.
///
/// return:
/// - None: on internal errors.
/// - SkimOutput: the collected key, event, query, selected items, etc.
pub fn run_with(options: &SkimOptions, source: Option<SkimItemReceiver>) -> Option<SkimOutput> {
let min_height = options
.min_height
.map(Skim::parse_height_string)
.expect("min_height should have default values");
let height = options
.height
.map(Skim::parse_height_string)
.expect("height should have default values");
let (tx, rx): (EventSender, EventReceiver) = channel();
let term = Arc::new(
Term::with_options(
TermOptions::default()
.min_height(min_height)
.height(height)
.clear_on_exit(!options.no_clear)
.disable_alternate_screen(options.no_clear_start)
.clear_on_start(!options.no_clear_start)
.hold(options.select1 || options.exit0 || options.sync),
)
.unwrap(),
);
if!options.no_mouse {
let _ = term.enable_mouse_support();
}
//------------------------------------------------------------------------------
// input
let mut input = input::Input::new();
input.parse_keymaps(&options.bind);
input.parse_expect_keys(options.expect.as_deref());
let tx_clone = tx.clone();
let term_clone = term.clone();
let input_thread = thread::spawn(move || loop {
if let Ok(key) = term_clone.poll_event() {
if key == TermEvent::User(()) {
break;
}
let (key, action_chain) = input.translate_event(key);
for event in action_chain.into_iter() {
let _ = tx_clone.send((key, event));
}
}
});
//------------------------------------------------------------------------------
// reader
let reader = Reader::with_options(options).source(source);
//------------------------------------------------------------------------------
// model + previewer
let mut model = Model::new(rx, tx, reader, term.clone(), options);
let ret = model.start();
let _ = term.send_event(TermEvent::User(())); // interrupt the input thread
let _ = input_thread.join();
ret
}
// 10 -> TermHeight::Fixed(10)
// 10% -> TermHeight::Percent(10)
fn parse_height_string(string: &str) -> TermHeight {
if string.ends_with('%') {
TermHeight::Percent(string[0..string.len() - 1].parse().unwrap_or(100))
} else {
TermHeight::Fixed(string.parse().unwrap_or(0))
}
}
}
| {
Cow::Borrowed(self.as_ref())
} | identifier_body |
lib.rs | #[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::any::Any;
use std::borrow::Cow;
use std::fmt::Display;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use crossbeam::channel::{Receiver, Sender};
use tuikit::prelude::{Event as TermEvent, *};
pub use crate::ansi::AnsiString;
pub use crate::engine::fuzzy::FuzzyAlgorithm;
use crate::event::{EventReceiver, EventSender};
use crate::model::Model;
pub use crate::options::SkimOptions;
pub use crate::output::SkimOutput;
use crate::reader::Reader;
mod ansi;
mod engine;
mod event;
pub mod field;
mod global;
mod header;
mod helper;
mod input;
mod item;
mod matcher;
mod model;
mod options;
mod orderedvec;
mod output;
pub mod prelude;
mod previewer;
mod query;
mod reader;
mod selection;
mod spinlock;
mod theme;
mod util;
//------------------------------------------------------------------------------
pub trait AsAny {
fn as_any(&self) -> &dyn Any;
fn as_any_mut(&mut self) -> &mut dyn Any;
}
impl<T: Any> AsAny for T {
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
/// A `SkimItem` defines what's been processed(fetched, matched, previewed and returned) by skim
///
/// # Downcast Example
/// Skim will return the item back, but in `Arc<dyn SkimItem>` form. We might want a reference
/// to the concrete type instead of trait object. Skim provide a somehow "complicated" way to
/// `downcast` it back to the reference of the original concrete type.
///
/// ```rust
/// use skim::prelude::*;
///
/// struct MyItem {}
/// impl SkimItem for MyItem {
/// fn text(&self) -> Cow<str> {
/// unimplemented!()
/// }
/// }
///
/// impl MyItem {
/// pub fn mutable(&mut self) -> i32 {
/// 1
/// }
///
/// pub fn immutable(&self) -> i32 {
/// 0
/// }
/// }
///
/// let mut ret: Arc<dyn SkimItem> = Arc::new(MyItem{});
/// let mutable: &mut MyItem = Arc::get_mut(&mut ret)
/// .expect("item is referenced by others")
/// .as_any_mut() // cast to Any
/// .downcast_mut::<MyItem>() // downcast to (mut) concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(mutable.mutable(), 1);
///
/// let immutable: &MyItem = (*ret).as_any() // cast to Any
/// .downcast_ref::<MyItem>() // downcast to concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(immutable.immutable(), 0)
/// ```
pub trait SkimItem: AsAny + Send + Sync +'static {
/// The string to be used for matching (without color)
fn text(&self) -> Cow<str>;
/// The content to be displayed on the item list, could contain ANSI properties
fn display<'a>(&'a self, context: DisplayContext<'a>) -> AnsiString<'a> {
AnsiString::from(context)
}
/// Custom preview content, default to `ItemPreview::Global` which will use global preview
/// setting(i.e. the command set by `preview` option)
fn preview(&self, _context: PreviewContext) -> ItemPreview {
ItemPreview::Global
}
| /// want, you could still use `downcast` to retain the pointer to the original struct.
fn output(&self) -> Cow<str> {
self.text()
}
/// we could limit the matching ranges of the `get_text` of the item.
/// providing (start_byte, end_byte) of the range
fn get_matching_ranges(&self) -> Option<&[(usize, usize)]> {
None
}
}
//------------------------------------------------------------------------------
// Implement SkimItem for raw strings
impl<T: AsRef<str> + Send + Sync +'static> SkimItem for T {
fn text(&self) -> Cow<str> {
Cow::Borrowed(self.as_ref())
}
}
//------------------------------------------------------------------------------
// Display Context
pub enum Matches<'a> {
None,
CharIndices(&'a [usize]),
CharRange(usize, usize),
ByteRange(usize, usize),
}
pub struct DisplayContext<'a> {
pub text: &'a str,
pub score: i32,
pub matches: Matches<'a>,
pub container_width: usize,
pub highlight_attr: Attr,
}
impl<'a> From<DisplayContext<'a>> for AnsiString<'a> {
fn from(context: DisplayContext<'a>) -> Self {
match context.matches {
Matches::CharIndices(indices) => AnsiString::from((context.text, indices, context.highlight_attr)),
Matches::CharRange(start, end) => {
AnsiString::new_str(context.text, vec![(context.highlight_attr, (start as u32, end as u32))])
}
Matches::ByteRange(start, end) => {
let ch_start = context.text[..start].chars().count();
let ch_end = ch_start + context.text[start..end].chars().count();
AnsiString::new_str(
context.text,
vec![(context.highlight_attr, (ch_start as u32, ch_end as u32))],
)
}
Matches::None => AnsiString::new_str(context.text, vec![]),
}
}
}
//------------------------------------------------------------------------------
// Preview Context
pub struct PreviewContext<'a> {
pub query: &'a str,
pub cmd_query: &'a str,
pub width: usize,
pub height: usize,
pub current_index: usize,
pub current_selection: &'a str,
/// selected item indices (may or may not include current item)
pub selected_indices: &'a [usize],
/// selected item texts (may or may not include current item)
pub selections: &'a [&'a str],
}
//------------------------------------------------------------------------------
// Preview
#[derive(Default, Copy, Clone, Debug)]
pub struct PreviewPosition {
pub h_scroll: Size,
pub h_offset: Size,
pub v_scroll: Size,
pub v_offset: Size,
}
pub enum ItemPreview {
/// execute the command and print the command's output
Command(String),
/// Display the prepared text(lines)
Text(String),
/// Display the colored text(lines)
AnsiText(String),
CommandWithPos(String, PreviewPosition),
TextWithPos(String, PreviewPosition),
AnsiWithPos(String, PreviewPosition),
/// Use global command settings to preview the item
Global,
}
//==============================================================================
// A match engine will execute the matching algorithm
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum CaseMatching {
Respect,
Ignore,
Smart,
}
impl Default for CaseMatching {
fn default() -> Self {
CaseMatching::Smart
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
#[allow(dead_code)]
pub enum MatchRange {
ByteRange(usize, usize),
// range of bytes
Chars(Vec<usize>), // individual character indices matched
}
pub type Rank = [i32; 4];
#[derive(Clone)]
pub struct MatchResult {
pub rank: Rank,
pub matched_range: MatchRange,
}
impl MatchResult {
pub fn range_char_indices(&self, text: &str) -> Vec<usize> {
match &self.matched_range {
&MatchRange::ByteRange(start, end) => {
let first = text[..start].chars().count();
let last = first + text[start..end].chars().count();
(first..last).collect()
}
MatchRange::Chars(vec) => vec.clone(),
}
}
}
pub trait MatchEngine: Sync + Send + Display {
fn match_item(&self, item: Arc<dyn SkimItem>) -> Option<MatchResult>;
}
pub trait MatchEngineFactory {
fn create_engine_with_case(&self, query: &str, case: CaseMatching) -> Box<dyn MatchEngine>;
fn create_engine(&self, query: &str) -> Box<dyn MatchEngine> {
self.create_engine_with_case(query, CaseMatching::default())
}
}
//------------------------------------------------------------------------------
// Preselection
/// A selector that determines whether an item should be "pre-selected" in multi-selection mode
pub trait Selector {
fn should_select(&self, index: usize, item: &dyn SkimItem) -> bool;
}
//------------------------------------------------------------------------------
pub type SkimItemSender = Sender<Arc<dyn SkimItem>>;
pub type SkimItemReceiver = Receiver<Arc<dyn SkimItem>>;
pub struct Skim {}
impl Skim {
/// params:
/// - options: the "complex" options that control how skim behaves
/// - source: a stream of items to be passed to skim for filtering.
/// If None is given, skim will invoke the command given to fetch the items.
///
/// return:
/// - None: on internal errors.
/// - SkimOutput: the collected key, event, query, selected items, etc.
pub fn run_with(options: &SkimOptions, source: Option<SkimItemReceiver>) -> Option<SkimOutput> {
let min_height = options
.min_height
.map(Skim::parse_height_string)
.expect("min_height should have default values");
let height = options
.height
.map(Skim::parse_height_string)
.expect("height should have default values");
let (tx, rx): (EventSender, EventReceiver) = channel();
let term = Arc::new(
Term::with_options(
TermOptions::default()
.min_height(min_height)
.height(height)
.clear_on_exit(!options.no_clear)
.disable_alternate_screen(options.no_clear_start)
.clear_on_start(!options.no_clear_start)
.hold(options.select1 || options.exit0 || options.sync),
)
.unwrap(),
);
if!options.no_mouse {
let _ = term.enable_mouse_support();
}
//------------------------------------------------------------------------------
// input
let mut input = input::Input::new();
input.parse_keymaps(&options.bind);
input.parse_expect_keys(options.expect.as_deref());
let tx_clone = tx.clone();
let term_clone = term.clone();
let input_thread = thread::spawn(move || loop {
if let Ok(key) = term_clone.poll_event() {
if key == TermEvent::User(()) {
break;
}
let (key, action_chain) = input.translate_event(key);
for event in action_chain.into_iter() {
let _ = tx_clone.send((key, event));
}
}
});
//------------------------------------------------------------------------------
// reader
let reader = Reader::with_options(options).source(source);
//------------------------------------------------------------------------------
// model + previewer
let mut model = Model::new(rx, tx, reader, term.clone(), options);
let ret = model.start();
let _ = term.send_event(TermEvent::User(())); // interrupt the input thread
let _ = input_thread.join();
ret
}
// 10 -> TermHeight::Fixed(10)
// 10% -> TermHeight::Percent(10)
fn parse_height_string(string: &str) -> TermHeight {
if string.ends_with('%') {
TermHeight::Percent(string[0..string.len() - 1].parse().unwrap_or(100))
} else {
TermHeight::Fixed(string.parse().unwrap_or(0))
}
}
} | /// Get output text(after accept), default to `text()`
/// Note that this function is intended to be used by the caller of skim and will not be used by
/// skim. And since skim will return the item back in `SkimOutput`, if string is not what you | random_line_split |
lib.rs | #[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::any::Any;
use std::borrow::Cow;
use std::fmt::Display;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use crossbeam::channel::{Receiver, Sender};
use tuikit::prelude::{Event as TermEvent, *};
pub use crate::ansi::AnsiString;
pub use crate::engine::fuzzy::FuzzyAlgorithm;
use crate::event::{EventReceiver, EventSender};
use crate::model::Model;
pub use crate::options::SkimOptions;
pub use crate::output::SkimOutput;
use crate::reader::Reader;
mod ansi;
mod engine;
mod event;
pub mod field;
mod global;
mod header;
mod helper;
mod input;
mod item;
mod matcher;
mod model;
mod options;
mod orderedvec;
mod output;
pub mod prelude;
mod previewer;
mod query;
mod reader;
mod selection;
mod spinlock;
mod theme;
mod util;
//------------------------------------------------------------------------------
pub trait AsAny {
fn as_any(&self) -> &dyn Any;
fn as_any_mut(&mut self) -> &mut dyn Any;
}
impl<T: Any> AsAny for T {
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
}
/// A `SkimItem` defines what's been processed(fetched, matched, previewed and returned) by skim
///
/// # Downcast Example
/// Skim will return the item back, but in `Arc<dyn SkimItem>` form. We might want a reference
/// to the concrete type instead of trait object. Skim provide a somehow "complicated" way to
/// `downcast` it back to the reference of the original concrete type.
///
/// ```rust
/// use skim::prelude::*;
///
/// struct MyItem {}
/// impl SkimItem for MyItem {
/// fn text(&self) -> Cow<str> {
/// unimplemented!()
/// }
/// }
///
/// impl MyItem {
/// pub fn mutable(&mut self) -> i32 {
/// 1
/// }
///
/// pub fn immutable(&self) -> i32 {
/// 0
/// }
/// }
///
/// let mut ret: Arc<dyn SkimItem> = Arc::new(MyItem{});
/// let mutable: &mut MyItem = Arc::get_mut(&mut ret)
/// .expect("item is referenced by others")
/// .as_any_mut() // cast to Any
/// .downcast_mut::<MyItem>() // downcast to (mut) concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(mutable.mutable(), 1);
///
/// let immutable: &MyItem = (*ret).as_any() // cast to Any
/// .downcast_ref::<MyItem>() // downcast to concrete type
/// .expect("something wrong with downcast");
/// assert_eq!(immutable.immutable(), 0)
/// ```
pub trait SkimItem: AsAny + Send + Sync +'static {
/// The string to be used for matching (without color)
fn text(&self) -> Cow<str>;
/// The content to be displayed on the item list, could contain ANSI properties
fn display<'a>(&'a self, context: DisplayContext<'a>) -> AnsiString<'a> {
AnsiString::from(context)
}
/// Custom preview content, default to `ItemPreview::Global` which will use global preview
/// setting(i.e. the command set by `preview` option)
fn preview(&self, _context: PreviewContext) -> ItemPreview {
ItemPreview::Global
}
/// Get output text(after accept), default to `text()`
/// Note that this function is intended to be used by the caller of skim and will not be used by
/// skim. And since skim will return the item back in `SkimOutput`, if string is not what you
/// want, you could still use `downcast` to retain the pointer to the original struct.
fn output(&self) -> Cow<str> {
self.text()
}
/// we could limit the matching ranges of the `get_text` of the item.
/// providing (start_byte, end_byte) of the range
fn get_matching_ranges(&self) -> Option<&[(usize, usize)]> {
None
}
}
//------------------------------------------------------------------------------
// Implement SkimItem for raw strings
impl<T: AsRef<str> + Send + Sync +'static> SkimItem for T {
fn text(&self) -> Cow<str> {
Cow::Borrowed(self.as_ref())
}
}
//------------------------------------------------------------------------------
// Display Context
pub enum Matches<'a> {
None,
CharIndices(&'a [usize]),
CharRange(usize, usize),
ByteRange(usize, usize),
}
pub struct DisplayContext<'a> {
pub text: &'a str,
pub score: i32,
pub matches: Matches<'a>,
pub container_width: usize,
pub highlight_attr: Attr,
}
impl<'a> From<DisplayContext<'a>> for AnsiString<'a> {
fn from(context: DisplayContext<'a>) -> Self {
match context.matches {
Matches::CharIndices(indices) => AnsiString::from((context.text, indices, context.highlight_attr)),
Matches::CharRange(start, end) => {
AnsiString::new_str(context.text, vec![(context.highlight_attr, (start as u32, end as u32))])
}
Matches::ByteRange(start, end) => {
let ch_start = context.text[..start].chars().count();
let ch_end = ch_start + context.text[start..end].chars().count();
AnsiString::new_str(
context.text,
vec![(context.highlight_attr, (ch_start as u32, ch_end as u32))],
)
}
Matches::None => AnsiString::new_str(context.text, vec![]),
}
}
}
//------------------------------------------------------------------------------
// Preview Context
pub struct PreviewContext<'a> {
pub query: &'a str,
pub cmd_query: &'a str,
pub width: usize,
pub height: usize,
pub current_index: usize,
pub current_selection: &'a str,
/// selected item indices (may or may not include current item)
pub selected_indices: &'a [usize],
/// selected item texts (may or may not include current item)
pub selections: &'a [&'a str],
}
//------------------------------------------------------------------------------
// Preview
#[derive(Default, Copy, Clone, Debug)]
pub struct PreviewPosition {
pub h_scroll: Size,
pub h_offset: Size,
pub v_scroll: Size,
pub v_offset: Size,
}
pub enum ItemPreview {
/// execute the command and print the command's output
Command(String),
/// Display the prepared text(lines)
Text(String),
/// Display the colored text(lines)
AnsiText(String),
CommandWithPos(String, PreviewPosition),
TextWithPos(String, PreviewPosition),
AnsiWithPos(String, PreviewPosition),
/// Use global command settings to preview the item
Global,
}
//==============================================================================
// A match engine will execute the matching algorithm
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum CaseMatching {
Respect,
Ignore,
Smart,
}
impl Default for CaseMatching {
fn | () -> Self {
CaseMatching::Smart
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
#[allow(dead_code)]
pub enum MatchRange {
ByteRange(usize, usize),
// range of bytes
Chars(Vec<usize>), // individual character indices matched
}
pub type Rank = [i32; 4];
#[derive(Clone)]
pub struct MatchResult {
pub rank: Rank,
pub matched_range: MatchRange,
}
impl MatchResult {
pub fn range_char_indices(&self, text: &str) -> Vec<usize> {
match &self.matched_range {
&MatchRange::ByteRange(start, end) => {
let first = text[..start].chars().count();
let last = first + text[start..end].chars().count();
(first..last).collect()
}
MatchRange::Chars(vec) => vec.clone(),
}
}
}
pub trait MatchEngine: Sync + Send + Display {
fn match_item(&self, item: Arc<dyn SkimItem>) -> Option<MatchResult>;
}
pub trait MatchEngineFactory {
fn create_engine_with_case(&self, query: &str, case: CaseMatching) -> Box<dyn MatchEngine>;
fn create_engine(&self, query: &str) -> Box<dyn MatchEngine> {
self.create_engine_with_case(query, CaseMatching::default())
}
}
//------------------------------------------------------------------------------
// Preselection
/// A selector that determines whether an item should be "pre-selected" in multi-selection mode
pub trait Selector {
fn should_select(&self, index: usize, item: &dyn SkimItem) -> bool;
}
//------------------------------------------------------------------------------
pub type SkimItemSender = Sender<Arc<dyn SkimItem>>;
pub type SkimItemReceiver = Receiver<Arc<dyn SkimItem>>;
pub struct Skim {}
impl Skim {
/// params:
/// - options: the "complex" options that control how skim behaves
/// - source: a stream of items to be passed to skim for filtering.
/// If None is given, skim will invoke the command given to fetch the items.
///
/// return:
/// - None: on internal errors.
/// - SkimOutput: the collected key, event, query, selected items, etc.
pub fn run_with(options: &SkimOptions, source: Option<SkimItemReceiver>) -> Option<SkimOutput> {
let min_height = options
.min_height
.map(Skim::parse_height_string)
.expect("min_height should have default values");
let height = options
.height
.map(Skim::parse_height_string)
.expect("height should have default values");
let (tx, rx): (EventSender, EventReceiver) = channel();
let term = Arc::new(
Term::with_options(
TermOptions::default()
.min_height(min_height)
.height(height)
.clear_on_exit(!options.no_clear)
.disable_alternate_screen(options.no_clear_start)
.clear_on_start(!options.no_clear_start)
.hold(options.select1 || options.exit0 || options.sync),
)
.unwrap(),
);
if!options.no_mouse {
let _ = term.enable_mouse_support();
}
//------------------------------------------------------------------------------
// input
let mut input = input::Input::new();
input.parse_keymaps(&options.bind);
input.parse_expect_keys(options.expect.as_deref());
let tx_clone = tx.clone();
let term_clone = term.clone();
let input_thread = thread::spawn(move || loop {
if let Ok(key) = term_clone.poll_event() {
if key == TermEvent::User(()) {
break;
}
let (key, action_chain) = input.translate_event(key);
for event in action_chain.into_iter() {
let _ = tx_clone.send((key, event));
}
}
});
//------------------------------------------------------------------------------
// reader
let reader = Reader::with_options(options).source(source);
//------------------------------------------------------------------------------
// model + previewer
let mut model = Model::new(rx, tx, reader, term.clone(), options);
let ret = model.start();
let _ = term.send_event(TermEvent::User(())); // interrupt the input thread
let _ = input_thread.join();
ret
}
// 10 -> TermHeight::Fixed(10)
// 10% -> TermHeight::Percent(10)
fn parse_height_string(string: &str) -> TermHeight {
if string.ends_with('%') {
TermHeight::Percent(string[0..string.len() - 1].parse().unwrap_or(100))
} else {
TermHeight::Fixed(string.parse().unwrap_or(0))
}
}
}
| default | identifier_name |
dynamic_store.rs | // Copyright 2017 Amagicom AB.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Bindings to [`SCDynamicStore`].
//!
//! See the examples directory for examples how to use this module.
//!
//! [`SCDynamicStore`]: https://developer.apple.com/documentation/systemconfiguration/scdynamicstore?language=objc
use crate::sys::{
dynamic_store::{
kSCDynamicStoreUseSessionKeys, SCDynamicStoreCallBack, SCDynamicStoreContext,
SCDynamicStoreCopyKeyList, SCDynamicStoreCopyValue, SCDynamicStoreCreateRunLoopSource,
SCDynamicStoreCreateWithOptions, SCDynamicStoreGetTypeID, SCDynamicStoreRef,
SCDynamicStoreRemoveValue, SCDynamicStoreSetNotificationKeys, SCDynamicStoreSetValue,
},
dynamic_store_copy_specific::SCDynamicStoreCopyProxies,
};
use core_foundation::{
array::{CFArray, CFArrayRef},
base::{kCFAllocatorDefault, CFType, TCFType},
boolean::CFBoolean,
dictionary::CFDictionary,
propertylist::{CFPropertyList, CFPropertyListSubClass},
runloop::CFRunLoopSource,
string::CFString,
};
use std::{ffi::c_void, ptr};
/// Struct describing the callback happening when a watched value in the dynamic store is changed.
pub struct SCDynamicStoreCallBackContext<T> {
/// The callback function that will be called when a watched value in the dynamic store is
/// changed.
pub callout: SCDynamicStoreCallBackT<T>,
/// The argument passed to each `callout` call. Can be used to keep state between
/// callbacks.
pub info: T,
}
/// Signature for callback functions getting called when a watched value in the dynamic store is
/// changed.
///
/// This is the safe callback definition, abstracting over the lower level `SCDynamicStoreCallBack`
/// from the `system-configuration-sys` crate.
pub type SCDynamicStoreCallBackT<T> =
fn(store: SCDynamicStore, changed_keys: CFArray<CFString>, info: &mut T);
/// Builder for [`SCDynamicStore`] sessions.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub struct SCDynamicStoreBuilder<T> {
name: CFString,
session_keys: bool,
callback_context: Option<SCDynamicStoreCallBackContext<T>>,
}
impl SCDynamicStoreBuilder<()> {
/// Creates a new builder. `name` is used as the name parameter when creating the
/// [`SCDynamicStore`] session.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub fn new<S: Into<CFString>>(name: S) -> Self {
SCDynamicStoreBuilder {
name: name.into(),
session_keys: false,
callback_context: None,
}
}
}
impl<T> SCDynamicStoreBuilder<T> {
/// Set wether or not the created [`SCDynamicStore`] should have session keys or not.
/// See [`SCDynamicStoreCreateWithOptions`] for details.
///
/// Defaults to `false`.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
/// [`SCDynamicStoreCreateWithOptions`]: https://developer.apple.com/documentation/systemconfiguration/1437818-scdynamicstorecreatewithoptions?language=objc
pub fn session_keys(mut self, session_keys: bool) -> Self {
self.session_keys = session_keys;
self
}
/// Set a callback context (callback function and data to pass to each callback call).
///
/// Defaults to having callbacks disabled.
pub fn callback_context<T2>(
self,
callback_context: SCDynamicStoreCallBackContext<T2>,
) -> SCDynamicStoreBuilder<T2> {
SCDynamicStoreBuilder {
name: self.name,
session_keys: self.session_keys,
callback_context: Some(callback_context),
}
}
/// Create the dynamic store session.
pub fn build(mut self) -> SCDynamicStore {
let store_options = self.create_store_options();
if let Some(callback_context) = self.callback_context.take() {
SCDynamicStore::create(
&self.name,
&store_options,
Some(convert_callback::<T>),
&mut self.create_context(callback_context),
)
} else {
SCDynamicStore::create(&self.name, &store_options, None, ptr::null_mut())
}
}
fn create_store_options(&self) -> CFDictionary {
let key = unsafe { CFString::wrap_under_create_rule(kSCDynamicStoreUseSessionKeys) };
let value = CFBoolean::from(self.session_keys);
let typed_dict = CFDictionary::from_CFType_pairs(&[(key, value)]);
unsafe { CFDictionary::wrap_under_get_rule(typed_dict.as_concrete_TypeRef()) }
}
fn create_context(
&self,
callback_context: SCDynamicStoreCallBackContext<T>,
) -> SCDynamicStoreContext {
// move the callback context struct to the heap and "forget" it.
// It will later be brought back into the Rust typesystem and freed in
// `release_callback_context`
let info_ptr = Box::into_raw(Box::new(callback_context));
SCDynamicStoreContext {
version: 0,
info: info_ptr as *mut _ as *mut c_void,
retain: None,
release: Some(release_callback_context::<T>),
copyDescription: None,
}
}
}
declare_TCFType! {
/// Access to the key-value pairs in the dynamic store of a running system.
///
/// Use the [`SCDynamicStoreBuilder`] to create instances of this.
///
/// [`SCDynamicStoreBuilder`]: struct.SCDynamicStoreBuilder.html
SCDynamicStore, SCDynamicStoreRef
}
impl_TCFType!(SCDynamicStore, SCDynamicStoreRef, SCDynamicStoreGetTypeID);
impl SCDynamicStore {
/// Creates a new session used to interact with the dynamic store maintained by the System
/// Configuration server.
fn create(
name: &CFString,
store_options: &CFDictionary,
callout: SCDynamicStoreCallBack,
context: *mut SCDynamicStoreContext,
) -> Self {
unsafe {
let store = SCDynamicStoreCreateWithOptions(
kCFAllocatorDefault,
name.as_concrete_TypeRef(),
store_options.as_concrete_TypeRef(),
callout,
context,
);
SCDynamicStore::wrap_under_create_rule(store)
}
}
/// Returns the keys that represent the current dynamic store entries that match the specified
/// pattern. Or `None` if an error occured.
///
/// `pattern` - A regular expression pattern used to match the dynamic store keys.
pub fn get_keys<S: Into<CFString>>(&self, pattern: S) -> Option<CFArray<CFString>> {
let cf_pattern = pattern.into();
unsafe {
let array_ref = SCDynamicStoreCopyKeyList(
self.as_concrete_TypeRef(),
cf_pattern.as_concrete_TypeRef(),
);
if!array_ref.is_null() {
Some(CFArray::wrap_under_create_rule(array_ref))
} else {
None
}
}
}
/// Returns the key-value pairs that represent the current internet proxy settings. Or `None` if
/// no proxy settings have been defined or if an error occured.
pub fn get_proxies(&self) -> Option<CFDictionary<CFString, CFType>> {
unsafe {
let dictionary_ref = SCDynamicStoreCopyProxies(self.as_concrete_TypeRef());
if!dictionary_ref.is_null() {
Some(CFDictionary::wrap_under_create_rule(dictionary_ref))
} else {
None
}
}
}
/// If the given key exists in the store, the associated value is returned.
///
/// Use `CFPropertyList::downcast_into` to cast the result into the correct type.
pub fn get<S: Into<CFString>>(&self, key: S) -> Option<CFPropertyList> {
let cf_key = key.into();
unsafe {
let dict_ref =
SCDynamicStoreCopyValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef());
if!dict_ref.is_null() {
Some(CFPropertyList::wrap_under_create_rule(dict_ref))
} else |
}
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set<S: Into<CFString>, V: CFPropertyListSubClass>(&self, key: S, value: V) -> bool {
self.set_raw(key, &value.into_CFPropertyList())
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set_raw<S: Into<CFString>>(&self, key: S, value: &CFPropertyList) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreSetValue(
self.as_concrete_TypeRef(),
cf_key.as_concrete_TypeRef(),
value.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Removes the value of the specified key from the dynamic store.
pub fn remove<S: Into<CFString>>(&self, key: S) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreRemoveValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef())
};
success!= 0
}
/// Specifies a set of keys and key patterns that should be monitored for changes.
pub fn set_notification_keys<T1, T2>(
&self,
keys: &CFArray<T1>,
patterns: &CFArray<T2>,
) -> bool {
let success = unsafe {
SCDynamicStoreSetNotificationKeys(
self.as_concrete_TypeRef(),
keys.as_concrete_TypeRef(),
patterns.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Creates a run loop source object that can be added to the application's run loop.
pub fn create_run_loop_source(&self) -> CFRunLoopSource {
unsafe {
let run_loop_source_ref = SCDynamicStoreCreateRunLoopSource(
kCFAllocatorDefault,
self.as_concrete_TypeRef(),
0,
);
CFRunLoopSource::wrap_under_create_rule(run_loop_source_ref)
}
}
}
/// The raw callback used by the safe `SCDynamicStore` to convert from the `SCDynamicStoreCallBack`
/// to the `SCDynamicStoreCallBackT`
unsafe extern "C" fn convert_callback<T>(
store_ref: SCDynamicStoreRef,
changed_keys_ref: CFArrayRef,
context_ptr: *mut c_void,
) {
let store = SCDynamicStore::wrap_under_get_rule(store_ref);
let changed_keys = CFArray::<CFString>::wrap_under_get_rule(changed_keys_ref);
let context = &mut *(context_ptr as *mut _ as *mut SCDynamicStoreCallBackContext<T>);
(context.callout)(store, changed_keys, &mut context.info);
}
// Release function called by core foundation on release of the dynamic store context.
unsafe extern "C" fn release_callback_context<T>(context_ptr: *const c_void) {
// Bring back the context object from raw ptr so it is correctly freed.
let _context = Box::from_raw(context_ptr as *mut SCDynamicStoreCallBackContext<T>);
}
| {
None
} | conditional_block |
dynamic_store.rs | // Copyright 2017 Amagicom AB.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Bindings to [`SCDynamicStore`].
//!
//! See the examples directory for examples how to use this module.
//!
//! [`SCDynamicStore`]: https://developer.apple.com/documentation/systemconfiguration/scdynamicstore?language=objc
use crate::sys::{
dynamic_store::{
kSCDynamicStoreUseSessionKeys, SCDynamicStoreCallBack, SCDynamicStoreContext,
SCDynamicStoreCopyKeyList, SCDynamicStoreCopyValue, SCDynamicStoreCreateRunLoopSource,
SCDynamicStoreCreateWithOptions, SCDynamicStoreGetTypeID, SCDynamicStoreRef,
SCDynamicStoreRemoveValue, SCDynamicStoreSetNotificationKeys, SCDynamicStoreSetValue,
},
dynamic_store_copy_specific::SCDynamicStoreCopyProxies,
};
use core_foundation::{
array::{CFArray, CFArrayRef},
base::{kCFAllocatorDefault, CFType, TCFType},
boolean::CFBoolean,
dictionary::CFDictionary,
propertylist::{CFPropertyList, CFPropertyListSubClass},
runloop::CFRunLoopSource,
string::CFString,
};
use std::{ffi::c_void, ptr};
/// Struct describing the callback happening when a watched value in the dynamic store is changed.
pub struct SCDynamicStoreCallBackContext<T> {
/// The callback function that will be called when a watched value in the dynamic store is
/// changed.
pub callout: SCDynamicStoreCallBackT<T>,
/// The argument passed to each `callout` call. Can be used to keep state between
/// callbacks.
pub info: T,
}
/// Signature for callback functions getting called when a watched value in the dynamic store is
/// changed.
///
/// This is the safe callback definition, abstracting over the lower level `SCDynamicStoreCallBack`
/// from the `system-configuration-sys` crate.
pub type SCDynamicStoreCallBackT<T> =
fn(store: SCDynamicStore, changed_keys: CFArray<CFString>, info: &mut T);
/// Builder for [`SCDynamicStore`] sessions.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub struct SCDynamicStoreBuilder<T> {
name: CFString,
session_keys: bool,
callback_context: Option<SCDynamicStoreCallBackContext<T>>,
}
impl SCDynamicStoreBuilder<()> {
/// Creates a new builder. `name` is used as the name parameter when creating the
/// [`SCDynamicStore`] session.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub fn new<S: Into<CFString>>(name: S) -> Self {
SCDynamicStoreBuilder {
name: name.into(),
session_keys: false,
callback_context: None,
}
}
}
impl<T> SCDynamicStoreBuilder<T> {
/// Set wether or not the created [`SCDynamicStore`] should have session keys or not.
/// See [`SCDynamicStoreCreateWithOptions`] for details.
///
/// Defaults to `false`.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
/// [`SCDynamicStoreCreateWithOptions`]: https://developer.apple.com/documentation/systemconfiguration/1437818-scdynamicstorecreatewithoptions?language=objc
pub fn session_keys(mut self, session_keys: bool) -> Self {
self.session_keys = session_keys;
self
}
/// Set a callback context (callback function and data to pass to each callback call).
///
/// Defaults to having callbacks disabled.
pub fn callback_context<T2>(
self,
callback_context: SCDynamicStoreCallBackContext<T2>,
) -> SCDynamicStoreBuilder<T2> {
SCDynamicStoreBuilder {
name: self.name,
session_keys: self.session_keys,
callback_context: Some(callback_context),
}
}
/// Create the dynamic store session.
pub fn build(mut self) -> SCDynamicStore {
let store_options = self.create_store_options();
if let Some(callback_context) = self.callback_context.take() {
SCDynamicStore::create(
&self.name,
&store_options,
Some(convert_callback::<T>),
&mut self.create_context(callback_context),
)
} else {
SCDynamicStore::create(&self.name, &store_options, None, ptr::null_mut())
}
}
fn create_store_options(&self) -> CFDictionary {
let key = unsafe { CFString::wrap_under_create_rule(kSCDynamicStoreUseSessionKeys) };
let value = CFBoolean::from(self.session_keys);
let typed_dict = CFDictionary::from_CFType_pairs(&[(key, value)]);
unsafe { CFDictionary::wrap_under_get_rule(typed_dict.as_concrete_TypeRef()) }
}
fn create_context(
&self,
callback_context: SCDynamicStoreCallBackContext<T>,
) -> SCDynamicStoreContext {
// move the callback context struct to the heap and "forget" it.
// It will later be brought back into the Rust typesystem and freed in
// `release_callback_context`
let info_ptr = Box::into_raw(Box::new(callback_context));
SCDynamicStoreContext {
version: 0,
info: info_ptr as *mut _ as *mut c_void,
retain: None,
release: Some(release_callback_context::<T>),
copyDescription: None,
}
}
}
declare_TCFType! {
/// Access to the key-value pairs in the dynamic store of a running system.
///
/// Use the [`SCDynamicStoreBuilder`] to create instances of this.
///
/// [`SCDynamicStoreBuilder`]: struct.SCDynamicStoreBuilder.html
SCDynamicStore, SCDynamicStoreRef
}
impl_TCFType!(SCDynamicStore, SCDynamicStoreRef, SCDynamicStoreGetTypeID);
impl SCDynamicStore {
/// Creates a new session used to interact with the dynamic store maintained by the System
/// Configuration server.
fn create(
name: &CFString,
store_options: &CFDictionary,
callout: SCDynamicStoreCallBack,
context: *mut SCDynamicStoreContext,
) -> Self {
unsafe {
let store = SCDynamicStoreCreateWithOptions(
kCFAllocatorDefault,
name.as_concrete_TypeRef(),
store_options.as_concrete_TypeRef(),
callout,
context,
);
SCDynamicStore::wrap_under_create_rule(store)
}
}
/// Returns the keys that represent the current dynamic store entries that match the specified
/// pattern. Or `None` if an error occured.
///
/// `pattern` - A regular expression pattern used to match the dynamic store keys.
pub fn get_keys<S: Into<CFString>>(&self, pattern: S) -> Option<CFArray<CFString>> {
let cf_pattern = pattern.into();
unsafe {
let array_ref = SCDynamicStoreCopyKeyList(
self.as_concrete_TypeRef(),
cf_pattern.as_concrete_TypeRef(),
);
if!array_ref.is_null() {
Some(CFArray::wrap_under_create_rule(array_ref))
} else {
None
}
}
}
/// Returns the key-value pairs that represent the current internet proxy settings. Or `None` if
/// no proxy settings have been defined or if an error occured.
pub fn get_proxies(&self) -> Option<CFDictionary<CFString, CFType>> {
unsafe {
let dictionary_ref = SCDynamicStoreCopyProxies(self.as_concrete_TypeRef());
if!dictionary_ref.is_null() {
Some(CFDictionary::wrap_under_create_rule(dictionary_ref))
} else {
None
}
}
}
/// If the given key exists in the store, the associated value is returned.
///
/// Use `CFPropertyList::downcast_into` to cast the result into the correct type.
pub fn get<S: Into<CFString>>(&self, key: S) -> Option<CFPropertyList> {
let cf_key = key.into();
unsafe {
let dict_ref =
SCDynamicStoreCopyValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef());
if!dict_ref.is_null() {
Some(CFPropertyList::wrap_under_create_rule(dict_ref))
} else {
None
}
}
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set<S: Into<CFString>, V: CFPropertyListSubClass>(&self, key: S, value: V) -> bool {
self.set_raw(key, &value.into_CFPropertyList())
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set_raw<S: Into<CFString>>(&self, key: S, value: &CFPropertyList) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreSetValue(
self.as_concrete_TypeRef(),
cf_key.as_concrete_TypeRef(),
value.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Removes the value of the specified key from the dynamic store.
pub fn remove<S: Into<CFString>>(&self, key: S) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreRemoveValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef())
};
success!= 0
}
/// Specifies a set of keys and key patterns that should be monitored for changes.
pub fn set_notification_keys<T1, T2>(
&self,
keys: &CFArray<T1>,
patterns: &CFArray<T2>,
) -> bool {
let success = unsafe {
SCDynamicStoreSetNotificationKeys(
self.as_concrete_TypeRef(),
keys.as_concrete_TypeRef(),
patterns.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Creates a run loop source object that can be added to the application's run loop.
pub fn create_run_loop_source(&self) -> CFRunLoopSource |
}
/// The raw callback used by the safe `SCDynamicStore` to convert from the `SCDynamicStoreCallBack`
/// to the `SCDynamicStoreCallBackT`
unsafe extern "C" fn convert_callback<T>(
store_ref: SCDynamicStoreRef,
changed_keys_ref: CFArrayRef,
context_ptr: *mut c_void,
) {
let store = SCDynamicStore::wrap_under_get_rule(store_ref);
let changed_keys = CFArray::<CFString>::wrap_under_get_rule(changed_keys_ref);
let context = &mut *(context_ptr as *mut _ as *mut SCDynamicStoreCallBackContext<T>);
(context.callout)(store, changed_keys, &mut context.info);
}
// Release function called by core foundation on release of the dynamic store context.
unsafe extern "C" fn release_callback_context<T>(context_ptr: *const c_void) {
// Bring back the context object from raw ptr so it is correctly freed.
let _context = Box::from_raw(context_ptr as *mut SCDynamicStoreCallBackContext<T>);
}
| {
unsafe {
let run_loop_source_ref = SCDynamicStoreCreateRunLoopSource(
kCFAllocatorDefault,
self.as_concrete_TypeRef(),
0,
);
CFRunLoopSource::wrap_under_create_rule(run_loop_source_ref)
}
} | identifier_body |
dynamic_store.rs | // Copyright 2017 Amagicom AB.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Bindings to [`SCDynamicStore`].
//!
//! See the examples directory for examples how to use this module.
//!
//! [`SCDynamicStore`]: https://developer.apple.com/documentation/systemconfiguration/scdynamicstore?language=objc
use crate::sys::{
dynamic_store::{
kSCDynamicStoreUseSessionKeys, SCDynamicStoreCallBack, SCDynamicStoreContext,
SCDynamicStoreCopyKeyList, SCDynamicStoreCopyValue, SCDynamicStoreCreateRunLoopSource,
SCDynamicStoreCreateWithOptions, SCDynamicStoreGetTypeID, SCDynamicStoreRef,
SCDynamicStoreRemoveValue, SCDynamicStoreSetNotificationKeys, SCDynamicStoreSetValue,
},
dynamic_store_copy_specific::SCDynamicStoreCopyProxies,
};
use core_foundation::{
array::{CFArray, CFArrayRef},
base::{kCFAllocatorDefault, CFType, TCFType},
boolean::CFBoolean,
dictionary::CFDictionary,
propertylist::{CFPropertyList, CFPropertyListSubClass},
runloop::CFRunLoopSource,
string::CFString,
};
use std::{ffi::c_void, ptr};
/// Struct describing the callback happening when a watched value in the dynamic store is changed.
pub struct SCDynamicStoreCallBackContext<T> {
/// The callback function that will be called when a watched value in the dynamic store is
/// changed.
pub callout: SCDynamicStoreCallBackT<T>,
/// The argument passed to each `callout` call. Can be used to keep state between
/// callbacks.
pub info: T,
}
/// Signature for callback functions getting called when a watched value in the dynamic store is
/// changed.
///
/// This is the safe callback definition, abstracting over the lower level `SCDynamicStoreCallBack`
/// from the `system-configuration-sys` crate.
pub type SCDynamicStoreCallBackT<T> =
fn(store: SCDynamicStore, changed_keys: CFArray<CFString>, info: &mut T);
/// Builder for [`SCDynamicStore`] sessions.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub struct SCDynamicStoreBuilder<T> {
name: CFString,
session_keys: bool,
callback_context: Option<SCDynamicStoreCallBackContext<T>>,
}
impl SCDynamicStoreBuilder<()> {
/// Creates a new builder. `name` is used as the name parameter when creating the
/// [`SCDynamicStore`] session.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub fn new<S: Into<CFString>>(name: S) -> Self {
SCDynamicStoreBuilder {
name: name.into(),
session_keys: false,
callback_context: None,
}
}
}
impl<T> SCDynamicStoreBuilder<T> {
/// Set wether or not the created [`SCDynamicStore`] should have session keys or not.
/// See [`SCDynamicStoreCreateWithOptions`] for details.
///
/// Defaults to `false`.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
/// [`SCDynamicStoreCreateWithOptions`]: https://developer.apple.com/documentation/systemconfiguration/1437818-scdynamicstorecreatewithoptions?language=objc
pub fn session_keys(mut self, session_keys: bool) -> Self {
self.session_keys = session_keys;
self
}
/// Set a callback context (callback function and data to pass to each callback call).
///
/// Defaults to having callbacks disabled.
pub fn callback_context<T2>(
self,
callback_context: SCDynamicStoreCallBackContext<T2>,
) -> SCDynamicStoreBuilder<T2> {
SCDynamicStoreBuilder {
name: self.name,
session_keys: self.session_keys,
callback_context: Some(callback_context),
}
}
/// Create the dynamic store session.
pub fn build(mut self) -> SCDynamicStore {
let store_options = self.create_store_options();
if let Some(callback_context) = self.callback_context.take() {
SCDynamicStore::create(
&self.name,
&store_options,
Some(convert_callback::<T>),
&mut self.create_context(callback_context),
)
} else {
SCDynamicStore::create(&self.name, &store_options, None, ptr::null_mut())
}
}
fn create_store_options(&self) -> CFDictionary {
let key = unsafe { CFString::wrap_under_create_rule(kSCDynamicStoreUseSessionKeys) };
let value = CFBoolean::from(self.session_keys);
let typed_dict = CFDictionary::from_CFType_pairs(&[(key, value)]);
unsafe { CFDictionary::wrap_under_get_rule(typed_dict.as_concrete_TypeRef()) }
}
fn | (
&self,
callback_context: SCDynamicStoreCallBackContext<T>,
) -> SCDynamicStoreContext {
// move the callback context struct to the heap and "forget" it.
// It will later be brought back into the Rust typesystem and freed in
// `release_callback_context`
let info_ptr = Box::into_raw(Box::new(callback_context));
SCDynamicStoreContext {
version: 0,
info: info_ptr as *mut _ as *mut c_void,
retain: None,
release: Some(release_callback_context::<T>),
copyDescription: None,
}
}
}
declare_TCFType! {
/// Access to the key-value pairs in the dynamic store of a running system.
///
/// Use the [`SCDynamicStoreBuilder`] to create instances of this.
///
/// [`SCDynamicStoreBuilder`]: struct.SCDynamicStoreBuilder.html
SCDynamicStore, SCDynamicStoreRef
}
impl_TCFType!(SCDynamicStore, SCDynamicStoreRef, SCDynamicStoreGetTypeID);
impl SCDynamicStore {
/// Creates a new session used to interact with the dynamic store maintained by the System
/// Configuration server.
fn create(
name: &CFString,
store_options: &CFDictionary,
callout: SCDynamicStoreCallBack,
context: *mut SCDynamicStoreContext,
) -> Self {
unsafe {
let store = SCDynamicStoreCreateWithOptions(
kCFAllocatorDefault,
name.as_concrete_TypeRef(),
store_options.as_concrete_TypeRef(),
callout,
context,
);
SCDynamicStore::wrap_under_create_rule(store)
}
}
/// Returns the keys that represent the current dynamic store entries that match the specified
/// pattern. Or `None` if an error occured.
///
/// `pattern` - A regular expression pattern used to match the dynamic store keys.
pub fn get_keys<S: Into<CFString>>(&self, pattern: S) -> Option<CFArray<CFString>> {
let cf_pattern = pattern.into();
unsafe {
let array_ref = SCDynamicStoreCopyKeyList(
self.as_concrete_TypeRef(),
cf_pattern.as_concrete_TypeRef(),
);
if!array_ref.is_null() {
Some(CFArray::wrap_under_create_rule(array_ref))
} else {
None
}
}
}
/// Returns the key-value pairs that represent the current internet proxy settings. Or `None` if
/// no proxy settings have been defined or if an error occured.
pub fn get_proxies(&self) -> Option<CFDictionary<CFString, CFType>> {
unsafe {
let dictionary_ref = SCDynamicStoreCopyProxies(self.as_concrete_TypeRef());
if!dictionary_ref.is_null() {
Some(CFDictionary::wrap_under_create_rule(dictionary_ref))
} else {
None
}
}
}
/// If the given key exists in the store, the associated value is returned.
///
/// Use `CFPropertyList::downcast_into` to cast the result into the correct type.
pub fn get<S: Into<CFString>>(&self, key: S) -> Option<CFPropertyList> {
let cf_key = key.into();
unsafe {
let dict_ref =
SCDynamicStoreCopyValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef());
if!dict_ref.is_null() {
Some(CFPropertyList::wrap_under_create_rule(dict_ref))
} else {
None
}
}
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set<S: Into<CFString>, V: CFPropertyListSubClass>(&self, key: S, value: V) -> bool {
self.set_raw(key, &value.into_CFPropertyList())
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set_raw<S: Into<CFString>>(&self, key: S, value: &CFPropertyList) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreSetValue(
self.as_concrete_TypeRef(),
cf_key.as_concrete_TypeRef(),
value.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Removes the value of the specified key from the dynamic store.
pub fn remove<S: Into<CFString>>(&self, key: S) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreRemoveValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef())
};
success!= 0
}
/// Specifies a set of keys and key patterns that should be monitored for changes.
pub fn set_notification_keys<T1, T2>(
&self,
keys: &CFArray<T1>,
patterns: &CFArray<T2>,
) -> bool {
let success = unsafe {
SCDynamicStoreSetNotificationKeys(
self.as_concrete_TypeRef(),
keys.as_concrete_TypeRef(),
patterns.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Creates a run loop source object that can be added to the application's run loop.
pub fn create_run_loop_source(&self) -> CFRunLoopSource {
unsafe {
let run_loop_source_ref = SCDynamicStoreCreateRunLoopSource(
kCFAllocatorDefault,
self.as_concrete_TypeRef(),
0,
);
CFRunLoopSource::wrap_under_create_rule(run_loop_source_ref)
}
}
}
/// The raw callback used by the safe `SCDynamicStore` to convert from the `SCDynamicStoreCallBack`
/// to the `SCDynamicStoreCallBackT`
unsafe extern "C" fn convert_callback<T>(
store_ref: SCDynamicStoreRef,
changed_keys_ref: CFArrayRef,
context_ptr: *mut c_void,
) {
let store = SCDynamicStore::wrap_under_get_rule(store_ref);
let changed_keys = CFArray::<CFString>::wrap_under_get_rule(changed_keys_ref);
let context = &mut *(context_ptr as *mut _ as *mut SCDynamicStoreCallBackContext<T>);
(context.callout)(store, changed_keys, &mut context.info);
}
// Release function called by core foundation on release of the dynamic store context.
unsafe extern "C" fn release_callback_context<T>(context_ptr: *const c_void) {
// Bring back the context object from raw ptr so it is correctly freed.
let _context = Box::from_raw(context_ptr as *mut SCDynamicStoreCallBackContext<T>);
}
| create_context | identifier_name |
dynamic_store.rs | // Copyright 2017 Amagicom AB.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Bindings to [`SCDynamicStore`].
//!
//! See the examples directory for examples how to use this module.
//!
//! [`SCDynamicStore`]: https://developer.apple.com/documentation/systemconfiguration/scdynamicstore?language=objc
use crate::sys::{
dynamic_store::{
kSCDynamicStoreUseSessionKeys, SCDynamicStoreCallBack, SCDynamicStoreContext,
SCDynamicStoreCopyKeyList, SCDynamicStoreCopyValue, SCDynamicStoreCreateRunLoopSource,
SCDynamicStoreCreateWithOptions, SCDynamicStoreGetTypeID, SCDynamicStoreRef,
SCDynamicStoreRemoveValue, SCDynamicStoreSetNotificationKeys, SCDynamicStoreSetValue,
},
dynamic_store_copy_specific::SCDynamicStoreCopyProxies,
};
use core_foundation::{
array::{CFArray, CFArrayRef},
base::{kCFAllocatorDefault, CFType, TCFType},
boolean::CFBoolean,
dictionary::CFDictionary,
propertylist::{CFPropertyList, CFPropertyListSubClass},
runloop::CFRunLoopSource,
string::CFString,
};
use std::{ffi::c_void, ptr};
/// Struct describing the callback happening when a watched value in the dynamic store is changed.
pub struct SCDynamicStoreCallBackContext<T> {
/// The callback function that will be called when a watched value in the dynamic store is
/// changed.
pub callout: SCDynamicStoreCallBackT<T>,
/// The argument passed to each `callout` call. Can be used to keep state between
/// callbacks.
pub info: T,
}
/// Signature for callback functions getting called when a watched value in the dynamic store is
/// changed.
///
/// This is the safe callback definition, abstracting over the lower level `SCDynamicStoreCallBack`
/// from the `system-configuration-sys` crate.
pub type SCDynamicStoreCallBackT<T> =
fn(store: SCDynamicStore, changed_keys: CFArray<CFString>, info: &mut T);
/// Builder for [`SCDynamicStore`] sessions.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub struct SCDynamicStoreBuilder<T> {
name: CFString,
session_keys: bool,
callback_context: Option<SCDynamicStoreCallBackContext<T>>,
}
impl SCDynamicStoreBuilder<()> {
/// Creates a new builder. `name` is used as the name parameter when creating the
/// [`SCDynamicStore`] session.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
pub fn new<S: Into<CFString>>(name: S) -> Self {
SCDynamicStoreBuilder {
name: name.into(),
session_keys: false,
callback_context: None,
}
}
}
impl<T> SCDynamicStoreBuilder<T> {
/// Set wether or not the created [`SCDynamicStore`] should have session keys or not.
/// See [`SCDynamicStoreCreateWithOptions`] for details.
///
/// Defaults to `false`.
///
/// [`SCDynamicStore`]: struct.SCDynamicStore.html
/// [`SCDynamicStoreCreateWithOptions`]: https://developer.apple.com/documentation/systemconfiguration/1437818-scdynamicstorecreatewithoptions?language=objc
pub fn session_keys(mut self, session_keys: bool) -> Self {
self.session_keys = session_keys;
self
}
/// Set a callback context (callback function and data to pass to each callback call).
///
/// Defaults to having callbacks disabled.
pub fn callback_context<T2>(
self,
callback_context: SCDynamicStoreCallBackContext<T2>,
) -> SCDynamicStoreBuilder<T2> {
SCDynamicStoreBuilder {
name: self.name,
session_keys: self.session_keys,
callback_context: Some(callback_context),
}
}
/// Create the dynamic store session.
pub fn build(mut self) -> SCDynamicStore {
let store_options = self.create_store_options();
if let Some(callback_context) = self.callback_context.take() {
SCDynamicStore::create(
&self.name,
&store_options,
Some(convert_callback::<T>),
&mut self.create_context(callback_context),
)
} else {
SCDynamicStore::create(&self.name, &store_options, None, ptr::null_mut())
}
}
fn create_store_options(&self) -> CFDictionary {
let key = unsafe { CFString::wrap_under_create_rule(kSCDynamicStoreUseSessionKeys) };
let value = CFBoolean::from(self.session_keys);
let typed_dict = CFDictionary::from_CFType_pairs(&[(key, value)]);
unsafe { CFDictionary::wrap_under_get_rule(typed_dict.as_concrete_TypeRef()) }
}
fn create_context(
&self,
callback_context: SCDynamicStoreCallBackContext<T>,
) -> SCDynamicStoreContext {
// move the callback context struct to the heap and "forget" it. | let info_ptr = Box::into_raw(Box::new(callback_context));
SCDynamicStoreContext {
version: 0,
info: info_ptr as *mut _ as *mut c_void,
retain: None,
release: Some(release_callback_context::<T>),
copyDescription: None,
}
}
}
declare_TCFType! {
/// Access to the key-value pairs in the dynamic store of a running system.
///
/// Use the [`SCDynamicStoreBuilder`] to create instances of this.
///
/// [`SCDynamicStoreBuilder`]: struct.SCDynamicStoreBuilder.html
SCDynamicStore, SCDynamicStoreRef
}
impl_TCFType!(SCDynamicStore, SCDynamicStoreRef, SCDynamicStoreGetTypeID);
impl SCDynamicStore {
/// Creates a new session used to interact with the dynamic store maintained by the System
/// Configuration server.
fn create(
name: &CFString,
store_options: &CFDictionary,
callout: SCDynamicStoreCallBack,
context: *mut SCDynamicStoreContext,
) -> Self {
unsafe {
let store = SCDynamicStoreCreateWithOptions(
kCFAllocatorDefault,
name.as_concrete_TypeRef(),
store_options.as_concrete_TypeRef(),
callout,
context,
);
SCDynamicStore::wrap_under_create_rule(store)
}
}
/// Returns the keys that represent the current dynamic store entries that match the specified
/// pattern. Or `None` if an error occured.
///
/// `pattern` - A regular expression pattern used to match the dynamic store keys.
pub fn get_keys<S: Into<CFString>>(&self, pattern: S) -> Option<CFArray<CFString>> {
let cf_pattern = pattern.into();
unsafe {
let array_ref = SCDynamicStoreCopyKeyList(
self.as_concrete_TypeRef(),
cf_pattern.as_concrete_TypeRef(),
);
if!array_ref.is_null() {
Some(CFArray::wrap_under_create_rule(array_ref))
} else {
None
}
}
}
/// Returns the key-value pairs that represent the current internet proxy settings. Or `None` if
/// no proxy settings have been defined or if an error occured.
pub fn get_proxies(&self) -> Option<CFDictionary<CFString, CFType>> {
unsafe {
let dictionary_ref = SCDynamicStoreCopyProxies(self.as_concrete_TypeRef());
if!dictionary_ref.is_null() {
Some(CFDictionary::wrap_under_create_rule(dictionary_ref))
} else {
None
}
}
}
/// If the given key exists in the store, the associated value is returned.
///
/// Use `CFPropertyList::downcast_into` to cast the result into the correct type.
pub fn get<S: Into<CFString>>(&self, key: S) -> Option<CFPropertyList> {
let cf_key = key.into();
unsafe {
let dict_ref =
SCDynamicStoreCopyValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef());
if!dict_ref.is_null() {
Some(CFPropertyList::wrap_under_create_rule(dict_ref))
} else {
None
}
}
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set<S: Into<CFString>, V: CFPropertyListSubClass>(&self, key: S, value: V) -> bool {
self.set_raw(key, &value.into_CFPropertyList())
}
/// Sets the value of the given key. Overwrites existing values.
/// Returns `true` on success, false on failure.
pub fn set_raw<S: Into<CFString>>(&self, key: S, value: &CFPropertyList) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreSetValue(
self.as_concrete_TypeRef(),
cf_key.as_concrete_TypeRef(),
value.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Removes the value of the specified key from the dynamic store.
pub fn remove<S: Into<CFString>>(&self, key: S) -> bool {
let cf_key = key.into();
let success = unsafe {
SCDynamicStoreRemoveValue(self.as_concrete_TypeRef(), cf_key.as_concrete_TypeRef())
};
success!= 0
}
/// Specifies a set of keys and key patterns that should be monitored for changes.
pub fn set_notification_keys<T1, T2>(
&self,
keys: &CFArray<T1>,
patterns: &CFArray<T2>,
) -> bool {
let success = unsafe {
SCDynamicStoreSetNotificationKeys(
self.as_concrete_TypeRef(),
keys.as_concrete_TypeRef(),
patterns.as_concrete_TypeRef(),
)
};
success!= 0
}
/// Creates a run loop source object that can be added to the application's run loop.
pub fn create_run_loop_source(&self) -> CFRunLoopSource {
unsafe {
let run_loop_source_ref = SCDynamicStoreCreateRunLoopSource(
kCFAllocatorDefault,
self.as_concrete_TypeRef(),
0,
);
CFRunLoopSource::wrap_under_create_rule(run_loop_source_ref)
}
}
}
/// The raw callback used by the safe `SCDynamicStore` to convert from the `SCDynamicStoreCallBack`
/// to the `SCDynamicStoreCallBackT`
unsafe extern "C" fn convert_callback<T>(
store_ref: SCDynamicStoreRef,
changed_keys_ref: CFArrayRef,
context_ptr: *mut c_void,
) {
let store = SCDynamicStore::wrap_under_get_rule(store_ref);
let changed_keys = CFArray::<CFString>::wrap_under_get_rule(changed_keys_ref);
let context = &mut *(context_ptr as *mut _ as *mut SCDynamicStoreCallBackContext<T>);
(context.callout)(store, changed_keys, &mut context.info);
}
// Release function called by core foundation on release of the dynamic store context.
unsafe extern "C" fn release_callback_context<T>(context_ptr: *const c_void) {
// Bring back the context object from raw ptr so it is correctly freed.
let _context = Box::from_raw(context_ptr as *mut SCDynamicStoreCallBackContext<T>);
} | // It will later be brought back into the Rust typesystem and freed in
// `release_callback_context` | random_line_split |
filter_list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Filter-list view widget
use super::{driver, Driver, ListView, SelectionError, SelectionMode};
use crate::Scrollable;
use kas::event::ChildMsg;
use kas::prelude::*;
use kas::updatable::filter::Filter;
use kas::updatable::{ListData, Updatable, UpdatableHandler};
use std::cell::RefCell;
use std::fmt::Debug;
use UpdatableHandler as UpdHandler;
/// Filter accessor over another accessor
///
/// This is an abstraction over a [`ListData`], applying a filter to items when
/// iterating and accessing.
///
/// When updating, the filter applies to the old value: if the old is included,
/// it is replaced by the new, otherwise no replacement occurs.
///
/// Note: the key and item types are the same as those in the underlying list,
/// thus one can also retrieve values from the underlying list directly.
///
/// Note: only `Rc<FilteredList<T, F>>` implements [`ListData`]; the [`Rc`]
/// wrapper is required!
///
/// Warning: this implementation is `O(n)` where `n = data.len()` and not well
/// optimised, thus is expected to be slow on large data lists.
#[derive(Clone, Debug)]
struct FilteredList<T: ListData, F: Filter<T::Item>> {
/// Direct access to unfiltered data
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
data: T,
/// Direct access to the filter
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
filter: F,
view: RefCell<Vec<T::Key>>, // TODO: does this need to be in a RefCell?
}
impl<T: ListData, F: Filter<T::Item>> FilteredList<T, F> {
/// Construct and apply filter
#[inline]
fn new(data: T, filter: F) -> Self {
let len = data.len().cast();
let view = RefCell::new(Vec::with_capacity(len));
let s = FilteredList { data, filter, view };
let _ = s.refresh();
s
}
/// Refresh the view
///
/// Re-applies the filter (`O(n)` where `n` is the number of data elements).
/// Calling this directly may be useful in case the data is modified.
///
/// An update should be triggered using the returned handle.
fn refresh(&self) -> Option<UpdateHandle> {
let mut view = self.view.borrow_mut();
view.clear();
for (key, item) in self.data.iter_vec(usize::MAX) {
if self.filter.matches(item) {
view.push(key);
}
}
self.filter.update_handle()
}
}
impl<T: ListData, F: Filter<T::Item>> Updatable for FilteredList<T, F> {
fn update_handle(&self) -> Option<UpdateHandle> {
self.filter.update_handle()
}
fn update_self(&self) -> Option<UpdateHandle> {
self.refresh()
}
}
impl<K, M, T: ListData + UpdatableHandler<K, M> +'static, F: Filter<T::Item>>
UpdatableHandler<K, M> for FilteredList<T, F>
{
fn handle(&self, key: &K, msg: &M) -> Option<UpdateHandle> {
self.data.handle(key, msg)
}
}
impl<T: ListData +'static, F: Filter<T::Item>> ListData for FilteredList<T, F> {
type Key = T::Key;
type Item = T::Item;
fn len(&self) -> usize {
self.view.borrow().len()
}
fn contains_key(&self, key: &Self::Key) -> bool {
self.get_cloned(key).is_some()
}
fn get_cloned(&self, key: &Self::Key) -> Option<Self::Item> {
// Check the item against our filter (probably O(1)) instead of using
// our filtered list (O(n) where n=self.len()).
self.data
.get_cloned(key)
.filter(|item| self.filter.matches(item.clone()))
}
fn update(&self, key: &Self::Key, value: Self::Item) -> Option<UpdateHandle> {
// Filtering does not affect result, but does affect the view
if self
.data
.get_cloned(key)
.map(|item|!self.filter.matches(item))
.unwrap_or(true)
{
// Not previously visible: no update occurs
return None;
}
let new_visible = self.filter.matches(value.clone());
let result = self.data.update(key, value);
if result.is_some() &&!new_visible |
result
}
fn iter_vec_from(&self, start: usize, limit: usize) -> Vec<(Self::Key, Self::Item)> {
let view = self.view.borrow();
let end = self.len().min(start + limit);
if start >= end {
return Vec::new();
}
let mut v = Vec::with_capacity(end - start);
for k in &view[start..end] {
v.push((k.clone(), self.data.get_cloned(k).unwrap()));
}
v
}
}
/// Filter-list view widget
///
/// This widget is a wrapper around [`ListView`] which applies a filter to the
/// data list.
///
/// Why is a data-filter a widget and not a pure-data item, you ask? The answer
/// is that a filter-list must be updated when the filter or the data changes,
/// and, since filtering a list is not especially cheap, the filtering must be
/// cached and updated when required, not every time the list view asks for more
/// data. Although it is possible to do this with a data-view, that requires
/// machinery for recursive-updates on data-structures and/or a mechanism to
/// test whether the underlying list-data changed. Implementing as a widget
/// avoids this.
// TODO: impl Clone
#[derive(Debug, Widget)]
#[handler(handle=noauto, generics = <>)]
#[layout(single)]
#[widget(config=noauto)]
pub struct FilterListView<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg> +'static,
F: Filter<T::Item>,
V: Driver<T::Item> = driver::Default,
> {
#[widget_core]
core: CoreData,
#[widget]
list: ListView<D, FilteredList<T, F>, V>,
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`FilterListView::new_with_direction`].
pub fn new(data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), <V as Default>::default(), data, filter)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, data: T, filter: F) -> Self {
Self::new_with_dir_driver(direction, <V as Default>::default(), data, filter)
}
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit view
pub fn new_with_driver(view: V, data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), view, data, filter)
}
}
impl<
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<Direction, T, F, V>
{
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.list.set_direction(direction)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction and view
pub fn new_with_dir_driver(direction: D, view: V, data: T, filter: F) -> Self {
let data = FilteredList::new(data, filter);
FilterListView {
core: Default::default(),
list: ListView::new_with_dir_driver(direction, view, data),
}
}
/// Access the stored data (pre-filter)
pub fn unfiltered_data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (pre-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn unfiltered_data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Access the stored data (post-filter)
pub fn data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (post-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Check whether a key has data (post-filter)
pub fn contains_key(&self, key: &T::Key) -> bool {
self.data().contains_key(key)
}
/// Get a copy of the shared value at `key` (post-filter)
pub fn get_value(&self, key: &T::Key) -> Option<T::Item> {
self.data().get_cloned(key)
}
/// Set shared data (post-filter)
///
/// This method updates the shared data, if supported (see
/// [`ListData::update`]). Other widgets sharing this data are notified
/// of the update, if data is changed.
pub fn set_value(&self, mgr: &mut Manager, key: &T::Key, data: T::Item) {
if let Some(handle) = self.data().update(key, data) {
mgr.trigger_update(handle, 0);
}
}
/// Update shared data (post-filter)
///
/// This is purely a convenience method over [`FilterListView::set_value`].
/// It does nothing if no value is found at `key`.
/// It notifies other widgets of updates to the shared data.
pub fn update_value<G: Fn(T::Item) -> T::Item>(&self, mgr: &mut Manager, key: &T::Key, f: G) {
if let Some(item) = self.get_value(key) {
self.set_value(mgr, key, f(item));
}
}
/// Get the current selection mode
pub fn selection_mode(&self) -> SelectionMode {
self.list.selection_mode()
}
/// Set the current selection mode
pub fn set_selection_mode(&mut self, mode: SelectionMode) -> TkAction {
self.list.set_selection_mode(mode)
}
/// Set the selection mode (inline)
pub fn with_selection_mode(mut self, mode: SelectionMode) -> Self {
let _ = self.set_selection_mode(mode);
self
}
/// Read the list of selected entries
///
/// With mode [`SelectionMode::Single`] this may contain zero or one entry;
/// use `selected_iter().next()` to extract only the first (optional) entry.
pub fn selected_iter(&'_ self) -> impl Iterator<Item = &'_ T::Key> + '_ {
self.list.selected_iter()
}
/// Check whether an entry is selected
pub fn is_selected(&self, key: &T::Key) -> bool {
self.list.is_selected(key)
}
/// Clear all selected items
///
/// Does not send [`ChildMsg`] responses.
pub fn clear_selected(&mut self) {
self.list.clear_selected()
}
/// Directly select an item
///
/// Returns `true` if selected, `false` if already selected.
/// Fails if selection mode does not permit selection or if the key is
/// invalid or filtered out.
///
/// Does not send [`ChildMsg`] responses.
pub fn select(&mut self, key: T::Key) -> Result<bool, SelectionError> {
self.list.select(key)
}
/// Directly deselect an item
///
/// Returns `true` if deselected, `false` if not previously selected.
/// Also returns `false` on invalid and filtered-out keys.
///
/// Does not send [`ChildMsg`] responses.
pub fn deselect(&mut self, key: &T::Key) -> bool {
self.list.deselect(key)
}
/// Manually trigger an update to handle changed data or filter
pub fn update_view(&mut self, mgr: &mut Manager) {
self.list.data().refresh();
self.list.update_view(mgr)
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.list.direction()
}
/// Set the preferred number of items visible (inline)
///
/// This affects the (ideal) size request and whether children are sized
/// according to their ideal or minimum size but not the minimum size.
pub fn with_num_visible(mut self, number: i32) -> Self {
self.list = self.list.with_num_visible(number);
self
}
}
// TODO: support derive(Scrollable)?
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Scrollable for FilterListView<D, T, F, V>
{
#[inline]
fn scroll_axes(&self, size: Size) -> (bool, bool) {
self.list.scroll_axes(size)
}
#[inline]
fn max_scroll_offset(&self) -> Offset {
self.list.max_scroll_offset()
}
#[inline]
fn scroll_offset(&self) -> Offset {
self.list.scroll_offset()
}
#[inline]
fn set_scroll_offset(&mut self, mgr: &mut Manager, offset: Offset) -> Offset {
self.list.set_scroll_offset(mgr, offset)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> WidgetConfig for FilterListView<D, T, F, V>
{
fn configure(&mut self, mgr: &mut Manager) {
// We must refresh the filtered list when the underlying list changes
if let Some(handle) = self.list.data().data.update_handle() {
mgr.update_on_handle(handle, self.id());
}
// As well as when the filter changes
if let Some(handle) = self.list.data().update_handle() {
mgr.update_on_handle(handle, self.id());
}
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Handler for FilterListView<D, T, F, V>
{
type Msg = ChildMsg<T::Key, <V::Widget as Handler>::Msg>;
fn handle(&mut self, mgr: &mut Manager, event: Event) -> Response<Self::Msg> {
match event {
Event::HandleUpdate {.. } => {
self.update_view(mgr);
return Response::Update;
}
_ => Response::None,
}
}
}
| {
// remove the updated item from our filtered list
self.view.borrow_mut().retain(|item| item != key);
} | conditional_block |
filter_list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Filter-list view widget
use super::{driver, Driver, ListView, SelectionError, SelectionMode};
use crate::Scrollable;
use kas::event::ChildMsg;
use kas::prelude::*;
use kas::updatable::filter::Filter;
use kas::updatable::{ListData, Updatable, UpdatableHandler};
use std::cell::RefCell;
use std::fmt::Debug;
use UpdatableHandler as UpdHandler;
/// Filter accessor over another accessor
///
/// This is an abstraction over a [`ListData`], applying a filter to items when
/// iterating and accessing.
///
/// When updating, the filter applies to the old value: if the old is included,
/// it is replaced by the new, otherwise no replacement occurs.
///
/// Note: the key and item types are the same as those in the underlying list,
/// thus one can also retrieve values from the underlying list directly.
///
/// Note: only `Rc<FilteredList<T, F>>` implements [`ListData`]; the [`Rc`]
/// wrapper is required!
///
/// Warning: this implementation is `O(n)` where `n = data.len()` and not well
/// optimised, thus is expected to be slow on large data lists.
#[derive(Clone, Debug)]
struct FilteredList<T: ListData, F: Filter<T::Item>> {
/// Direct access to unfiltered data
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
data: T,
/// Direct access to the filter
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
filter: F,
view: RefCell<Vec<T::Key>>, // TODO: does this need to be in a RefCell?
}
impl<T: ListData, F: Filter<T::Item>> FilteredList<T, F> {
/// Construct and apply filter
#[inline]
fn new(data: T, filter: F) -> Self {
let len = data.len().cast();
let view = RefCell::new(Vec::with_capacity(len));
let s = FilteredList { data, filter, view };
let _ = s.refresh();
s
}
/// Refresh the view
///
/// Re-applies the filter (`O(n)` where `n` is the number of data elements).
/// Calling this directly may be useful in case the data is modified.
///
/// An update should be triggered using the returned handle.
fn refresh(&self) -> Option<UpdateHandle> {
let mut view = self.view.borrow_mut();
view.clear();
for (key, item) in self.data.iter_vec(usize::MAX) {
if self.filter.matches(item) {
view.push(key);
}
}
self.filter.update_handle()
}
}
impl<T: ListData, F: Filter<T::Item>> Updatable for FilteredList<T, F> {
fn update_handle(&self) -> Option<UpdateHandle> |
fn update_self(&self) -> Option<UpdateHandle> {
self.refresh()
}
}
impl<K, M, T: ListData + UpdatableHandler<K, M> +'static, F: Filter<T::Item>>
UpdatableHandler<K, M> for FilteredList<T, F>
{
fn handle(&self, key: &K, msg: &M) -> Option<UpdateHandle> {
self.data.handle(key, msg)
}
}
impl<T: ListData +'static, F: Filter<T::Item>> ListData for FilteredList<T, F> {
type Key = T::Key;
type Item = T::Item;
fn len(&self) -> usize {
self.view.borrow().len()
}
fn contains_key(&self, key: &Self::Key) -> bool {
self.get_cloned(key).is_some()
}
fn get_cloned(&self, key: &Self::Key) -> Option<Self::Item> {
// Check the item against our filter (probably O(1)) instead of using
// our filtered list (O(n) where n=self.len()).
self.data
.get_cloned(key)
.filter(|item| self.filter.matches(item.clone()))
}
fn update(&self, key: &Self::Key, value: Self::Item) -> Option<UpdateHandle> {
// Filtering does not affect result, but does affect the view
if self
.data
.get_cloned(key)
.map(|item|!self.filter.matches(item))
.unwrap_or(true)
{
// Not previously visible: no update occurs
return None;
}
let new_visible = self.filter.matches(value.clone());
let result = self.data.update(key, value);
if result.is_some() &&!new_visible {
// remove the updated item from our filtered list
self.view.borrow_mut().retain(|item| item!= key);
}
result
}
fn iter_vec_from(&self, start: usize, limit: usize) -> Vec<(Self::Key, Self::Item)> {
let view = self.view.borrow();
let end = self.len().min(start + limit);
if start >= end {
return Vec::new();
}
let mut v = Vec::with_capacity(end - start);
for k in &view[start..end] {
v.push((k.clone(), self.data.get_cloned(k).unwrap()));
}
v
}
}
/// Filter-list view widget
///
/// This widget is a wrapper around [`ListView`] which applies a filter to the
/// data list.
///
/// Why is a data-filter a widget and not a pure-data item, you ask? The answer
/// is that a filter-list must be updated when the filter or the data changes,
/// and, since filtering a list is not especially cheap, the filtering must be
/// cached and updated when required, not every time the list view asks for more
/// data. Although it is possible to do this with a data-view, that requires
/// machinery for recursive-updates on data-structures and/or a mechanism to
/// test whether the underlying list-data changed. Implementing as a widget
/// avoids this.
// TODO: impl Clone
#[derive(Debug, Widget)]
#[handler(handle=noauto, generics = <>)]
#[layout(single)]
#[widget(config=noauto)]
pub struct FilterListView<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg> +'static,
F: Filter<T::Item>,
V: Driver<T::Item> = driver::Default,
> {
#[widget_core]
core: CoreData,
#[widget]
list: ListView<D, FilteredList<T, F>, V>,
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`FilterListView::new_with_direction`].
pub fn new(data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), <V as Default>::default(), data, filter)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, data: T, filter: F) -> Self {
Self::new_with_dir_driver(direction, <V as Default>::default(), data, filter)
}
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit view
pub fn new_with_driver(view: V, data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), view, data, filter)
}
}
impl<
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<Direction, T, F, V>
{
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.list.set_direction(direction)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction and view
pub fn new_with_dir_driver(direction: D, view: V, data: T, filter: F) -> Self {
let data = FilteredList::new(data, filter);
FilterListView {
core: Default::default(),
list: ListView::new_with_dir_driver(direction, view, data),
}
}
/// Access the stored data (pre-filter)
pub fn unfiltered_data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (pre-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn unfiltered_data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Access the stored data (post-filter)
pub fn data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (post-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Check whether a key has data (post-filter)
pub fn contains_key(&self, key: &T::Key) -> bool {
self.data().contains_key(key)
}
/// Get a copy of the shared value at `key` (post-filter)
pub fn get_value(&self, key: &T::Key) -> Option<T::Item> {
self.data().get_cloned(key)
}
/// Set shared data (post-filter)
///
/// This method updates the shared data, if supported (see
/// [`ListData::update`]). Other widgets sharing this data are notified
/// of the update, if data is changed.
pub fn set_value(&self, mgr: &mut Manager, key: &T::Key, data: T::Item) {
if let Some(handle) = self.data().update(key, data) {
mgr.trigger_update(handle, 0);
}
}
/// Update shared data (post-filter)
///
/// This is purely a convenience method over [`FilterListView::set_value`].
/// It does nothing if no value is found at `key`.
/// It notifies other widgets of updates to the shared data.
pub fn update_value<G: Fn(T::Item) -> T::Item>(&self, mgr: &mut Manager, key: &T::Key, f: G) {
if let Some(item) = self.get_value(key) {
self.set_value(mgr, key, f(item));
}
}
/// Get the current selection mode
pub fn selection_mode(&self) -> SelectionMode {
self.list.selection_mode()
}
/// Set the current selection mode
pub fn set_selection_mode(&mut self, mode: SelectionMode) -> TkAction {
self.list.set_selection_mode(mode)
}
/// Set the selection mode (inline)
pub fn with_selection_mode(mut self, mode: SelectionMode) -> Self {
let _ = self.set_selection_mode(mode);
self
}
/// Read the list of selected entries
///
/// With mode [`SelectionMode::Single`] this may contain zero or one entry;
/// use `selected_iter().next()` to extract only the first (optional) entry.
pub fn selected_iter(&'_ self) -> impl Iterator<Item = &'_ T::Key> + '_ {
self.list.selected_iter()
}
/// Check whether an entry is selected
pub fn is_selected(&self, key: &T::Key) -> bool {
self.list.is_selected(key)
}
/// Clear all selected items
///
/// Does not send [`ChildMsg`] responses.
pub fn clear_selected(&mut self) {
self.list.clear_selected()
}
/// Directly select an item
///
/// Returns `true` if selected, `false` if already selected.
/// Fails if selection mode does not permit selection or if the key is
/// invalid or filtered out.
///
/// Does not send [`ChildMsg`] responses.
pub fn select(&mut self, key: T::Key) -> Result<bool, SelectionError> {
self.list.select(key)
}
/// Directly deselect an item
///
/// Returns `true` if deselected, `false` if not previously selected.
/// Also returns `false` on invalid and filtered-out keys.
///
/// Does not send [`ChildMsg`] responses.
pub fn deselect(&mut self, key: &T::Key) -> bool {
self.list.deselect(key)
}
/// Manually trigger an update to handle changed data or filter
pub fn update_view(&mut self, mgr: &mut Manager) {
self.list.data().refresh();
self.list.update_view(mgr)
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.list.direction()
}
/// Set the preferred number of items visible (inline)
///
/// This affects the (ideal) size request and whether children are sized
/// according to their ideal or minimum size but not the minimum size.
pub fn with_num_visible(mut self, number: i32) -> Self {
self.list = self.list.with_num_visible(number);
self
}
}
// TODO: support derive(Scrollable)?
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Scrollable for FilterListView<D, T, F, V>
{
#[inline]
fn scroll_axes(&self, size: Size) -> (bool, bool) {
self.list.scroll_axes(size)
}
#[inline]
fn max_scroll_offset(&self) -> Offset {
self.list.max_scroll_offset()
}
#[inline]
fn scroll_offset(&self) -> Offset {
self.list.scroll_offset()
}
#[inline]
fn set_scroll_offset(&mut self, mgr: &mut Manager, offset: Offset) -> Offset {
self.list.set_scroll_offset(mgr, offset)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> WidgetConfig for FilterListView<D, T, F, V>
{
fn configure(&mut self, mgr: &mut Manager) {
// We must refresh the filtered list when the underlying list changes
if let Some(handle) = self.list.data().data.update_handle() {
mgr.update_on_handle(handle, self.id());
}
// As well as when the filter changes
if let Some(handle) = self.list.data().update_handle() {
mgr.update_on_handle(handle, self.id());
}
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Handler for FilterListView<D, T, F, V>
{
type Msg = ChildMsg<T::Key, <V::Widget as Handler>::Msg>;
fn handle(&mut self, mgr: &mut Manager, event: Event) -> Response<Self::Msg> {
match event {
Event::HandleUpdate {.. } => {
self.update_view(mgr);
return Response::Update;
}
_ => Response::None,
}
}
}
| {
self.filter.update_handle()
} | identifier_body |
filter_list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Filter-list view widget
use super::{driver, Driver, ListView, SelectionError, SelectionMode};
use crate::Scrollable;
use kas::event::ChildMsg;
use kas::prelude::*;
use kas::updatable::filter::Filter;
use kas::updatable::{ListData, Updatable, UpdatableHandler};
use std::cell::RefCell;
use std::fmt::Debug;
use UpdatableHandler as UpdHandler;
/// Filter accessor over another accessor
///
/// This is an abstraction over a [`ListData`], applying a filter to items when
/// iterating and accessing.
///
/// When updating, the filter applies to the old value: if the old is included,
/// it is replaced by the new, otherwise no replacement occurs.
///
/// Note: the key and item types are the same as those in the underlying list,
/// thus one can also retrieve values from the underlying list directly.
///
/// Note: only `Rc<FilteredList<T, F>>` implements [`ListData`]; the [`Rc`]
/// wrapper is required!
///
/// Warning: this implementation is `O(n)` where `n = data.len()` and not well
/// optimised, thus is expected to be slow on large data lists.
#[derive(Clone, Debug)]
struct FilteredList<T: ListData, F: Filter<T::Item>> {
/// Direct access to unfiltered data
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
data: T,
/// Direct access to the filter
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
filter: F,
view: RefCell<Vec<T::Key>>, // TODO: does this need to be in a RefCell?
}
impl<T: ListData, F: Filter<T::Item>> FilteredList<T, F> {
/// Construct and apply filter
#[inline]
fn new(data: T, filter: F) -> Self {
let len = data.len().cast();
let view = RefCell::new(Vec::with_capacity(len));
let s = FilteredList { data, filter, view };
let _ = s.refresh();
s
}
/// Refresh the view
///
/// Re-applies the filter (`O(n)` where `n` is the number of data elements).
/// Calling this directly may be useful in case the data is modified.
///
/// An update should be triggered using the returned handle.
fn refresh(&self) -> Option<UpdateHandle> {
let mut view = self.view.borrow_mut();
view.clear();
for (key, item) in self.data.iter_vec(usize::MAX) {
if self.filter.matches(item) {
view.push(key);
}
}
self.filter.update_handle()
}
}
impl<T: ListData, F: Filter<T::Item>> Updatable for FilteredList<T, F> {
fn update_handle(&self) -> Option<UpdateHandle> {
self.filter.update_handle()
}
fn update_self(&self) -> Option<UpdateHandle> {
self.refresh()
}
}
impl<K, M, T: ListData + UpdatableHandler<K, M> +'static, F: Filter<T::Item>>
UpdatableHandler<K, M> for FilteredList<T, F>
{
fn handle(&self, key: &K, msg: &M) -> Option<UpdateHandle> {
self.data.handle(key, msg)
}
}
impl<T: ListData +'static, F: Filter<T::Item>> ListData for FilteredList<T, F> {
type Key = T::Key;
type Item = T::Item;
fn | (&self) -> usize {
self.view.borrow().len()
}
fn contains_key(&self, key: &Self::Key) -> bool {
self.get_cloned(key).is_some()
}
fn get_cloned(&self, key: &Self::Key) -> Option<Self::Item> {
// Check the item against our filter (probably O(1)) instead of using
// our filtered list (O(n) where n=self.len()).
self.data
.get_cloned(key)
.filter(|item| self.filter.matches(item.clone()))
}
fn update(&self, key: &Self::Key, value: Self::Item) -> Option<UpdateHandle> {
// Filtering does not affect result, but does affect the view
if self
.data
.get_cloned(key)
.map(|item|!self.filter.matches(item))
.unwrap_or(true)
{
// Not previously visible: no update occurs
return None;
}
let new_visible = self.filter.matches(value.clone());
let result = self.data.update(key, value);
if result.is_some() &&!new_visible {
// remove the updated item from our filtered list
self.view.borrow_mut().retain(|item| item!= key);
}
result
}
fn iter_vec_from(&self, start: usize, limit: usize) -> Vec<(Self::Key, Self::Item)> {
let view = self.view.borrow();
let end = self.len().min(start + limit);
if start >= end {
return Vec::new();
}
let mut v = Vec::with_capacity(end - start);
for k in &view[start..end] {
v.push((k.clone(), self.data.get_cloned(k).unwrap()));
}
v
}
}
/// Filter-list view widget
///
/// This widget is a wrapper around [`ListView`] which applies a filter to the
/// data list.
///
/// Why is a data-filter a widget and not a pure-data item, you ask? The answer
/// is that a filter-list must be updated when the filter or the data changes,
/// and, since filtering a list is not especially cheap, the filtering must be
/// cached and updated when required, not every time the list view asks for more
/// data. Although it is possible to do this with a data-view, that requires
/// machinery for recursive-updates on data-structures and/or a mechanism to
/// test whether the underlying list-data changed. Implementing as a widget
/// avoids this.
// TODO: impl Clone
#[derive(Debug, Widget)]
#[handler(handle=noauto, generics = <>)]
#[layout(single)]
#[widget(config=noauto)]
pub struct FilterListView<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg> +'static,
F: Filter<T::Item>,
V: Driver<T::Item> = driver::Default,
> {
#[widget_core]
core: CoreData,
#[widget]
list: ListView<D, FilteredList<T, F>, V>,
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`FilterListView::new_with_direction`].
pub fn new(data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), <V as Default>::default(), data, filter)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, data: T, filter: F) -> Self {
Self::new_with_dir_driver(direction, <V as Default>::default(), data, filter)
}
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit view
pub fn new_with_driver(view: V, data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), view, data, filter)
}
}
impl<
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<Direction, T, F, V>
{
/// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.list.set_direction(direction)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction and view
pub fn new_with_dir_driver(direction: D, view: V, data: T, filter: F) -> Self {
let data = FilteredList::new(data, filter);
FilterListView {
core: Default::default(),
list: ListView::new_with_dir_driver(direction, view, data),
}
}
/// Access the stored data (pre-filter)
pub fn unfiltered_data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (pre-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn unfiltered_data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Access the stored data (post-filter)
pub fn data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (post-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Check whether a key has data (post-filter)
pub fn contains_key(&self, key: &T::Key) -> bool {
self.data().contains_key(key)
}
/// Get a copy of the shared value at `key` (post-filter)
pub fn get_value(&self, key: &T::Key) -> Option<T::Item> {
self.data().get_cloned(key)
}
/// Set shared data (post-filter)
///
/// This method updates the shared data, if supported (see
/// [`ListData::update`]). Other widgets sharing this data are notified
/// of the update, if data is changed.
pub fn set_value(&self, mgr: &mut Manager, key: &T::Key, data: T::Item) {
if let Some(handle) = self.data().update(key, data) {
mgr.trigger_update(handle, 0);
}
}
/// Update shared data (post-filter)
///
/// This is purely a convenience method over [`FilterListView::set_value`].
/// It does nothing if no value is found at `key`.
/// It notifies other widgets of updates to the shared data.
pub fn update_value<G: Fn(T::Item) -> T::Item>(&self, mgr: &mut Manager, key: &T::Key, f: G) {
if let Some(item) = self.get_value(key) {
self.set_value(mgr, key, f(item));
}
}
/// Get the current selection mode
pub fn selection_mode(&self) -> SelectionMode {
self.list.selection_mode()
}
/// Set the current selection mode
pub fn set_selection_mode(&mut self, mode: SelectionMode) -> TkAction {
self.list.set_selection_mode(mode)
}
/// Set the selection mode (inline)
pub fn with_selection_mode(mut self, mode: SelectionMode) -> Self {
let _ = self.set_selection_mode(mode);
self
}
/// Read the list of selected entries
///
/// With mode [`SelectionMode::Single`] this may contain zero or one entry;
/// use `selected_iter().next()` to extract only the first (optional) entry.
pub fn selected_iter(&'_ self) -> impl Iterator<Item = &'_ T::Key> + '_ {
self.list.selected_iter()
}
/// Check whether an entry is selected
pub fn is_selected(&self, key: &T::Key) -> bool {
self.list.is_selected(key)
}
/// Clear all selected items
///
/// Does not send [`ChildMsg`] responses.
pub fn clear_selected(&mut self) {
self.list.clear_selected()
}
/// Directly select an item
///
/// Returns `true` if selected, `false` if already selected.
/// Fails if selection mode does not permit selection or if the key is
/// invalid or filtered out.
///
/// Does not send [`ChildMsg`] responses.
pub fn select(&mut self, key: T::Key) -> Result<bool, SelectionError> {
self.list.select(key)
}
/// Directly deselect an item
///
/// Returns `true` if deselected, `false` if not previously selected.
/// Also returns `false` on invalid and filtered-out keys.
///
/// Does not send [`ChildMsg`] responses.
pub fn deselect(&mut self, key: &T::Key) -> bool {
self.list.deselect(key)
}
/// Manually trigger an update to handle changed data or filter
pub fn update_view(&mut self, mgr: &mut Manager) {
self.list.data().refresh();
self.list.update_view(mgr)
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.list.direction()
}
/// Set the preferred number of items visible (inline)
///
/// This affects the (ideal) size request and whether children are sized
/// according to their ideal or minimum size but not the minimum size.
pub fn with_num_visible(mut self, number: i32) -> Self {
self.list = self.list.with_num_visible(number);
self
}
}
// TODO: support derive(Scrollable)?
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Scrollable for FilterListView<D, T, F, V>
{
#[inline]
fn scroll_axes(&self, size: Size) -> (bool, bool) {
self.list.scroll_axes(size)
}
#[inline]
fn max_scroll_offset(&self) -> Offset {
self.list.max_scroll_offset()
}
#[inline]
fn scroll_offset(&self) -> Offset {
self.list.scroll_offset()
}
#[inline]
fn set_scroll_offset(&mut self, mgr: &mut Manager, offset: Offset) -> Offset {
self.list.set_scroll_offset(mgr, offset)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> WidgetConfig for FilterListView<D, T, F, V>
{
fn configure(&mut self, mgr: &mut Manager) {
// We must refresh the filtered list when the underlying list changes
if let Some(handle) = self.list.data().data.update_handle() {
mgr.update_on_handle(handle, self.id());
}
// As well as when the filter changes
if let Some(handle) = self.list.data().update_handle() {
mgr.update_on_handle(handle, self.id());
}
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Handler for FilterListView<D, T, F, V>
{
type Msg = ChildMsg<T::Key, <V::Widget as Handler>::Msg>;
fn handle(&mut self, mgr: &mut Manager, event: Event) -> Response<Self::Msg> {
match event {
Event::HandleUpdate {.. } => {
self.update_view(mgr);
return Response::Update;
}
_ => Response::None,
}
}
}
| len | identifier_name |
filter_list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Filter-list view widget
use super::{driver, Driver, ListView, SelectionError, SelectionMode};
use crate::Scrollable;
use kas::event::ChildMsg;
use kas::prelude::*;
use kas::updatable::filter::Filter;
use kas::updatable::{ListData, Updatable, UpdatableHandler};
use std::cell::RefCell;
use std::fmt::Debug;
use UpdatableHandler as UpdHandler;
/// Filter accessor over another accessor
///
/// This is an abstraction over a [`ListData`], applying a filter to items when
/// iterating and accessing.
///
/// When updating, the filter applies to the old value: if the old is included,
/// it is replaced by the new, otherwise no replacement occurs.
///
/// Note: the key and item types are the same as those in the underlying list,
/// thus one can also retrieve values from the underlying list directly.
///
/// Note: only `Rc<FilteredList<T, F>>` implements [`ListData`]; the [`Rc`]
/// wrapper is required!
///
/// Warning: this implementation is `O(n)` where `n = data.len()` and not well
/// optimised, thus is expected to be slow on large data lists.
#[derive(Clone, Debug)]
struct FilteredList<T: ListData, F: Filter<T::Item>> {
/// Direct access to unfiltered data
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
data: T,
/// Direct access to the filter
///
/// If adjusting this, one should call [`FilteredList::refresh`] after.
filter: F,
view: RefCell<Vec<T::Key>>, // TODO: does this need to be in a RefCell?
}
impl<T: ListData, F: Filter<T::Item>> FilteredList<T, F> {
/// Construct and apply filter
#[inline]
fn new(data: T, filter: F) -> Self {
let len = data.len().cast();
let view = RefCell::new(Vec::with_capacity(len));
let s = FilteredList { data, filter, view };
let _ = s.refresh();
s
}
/// Refresh the view
///
/// Re-applies the filter (`O(n)` where `n` is the number of data elements).
/// Calling this directly may be useful in case the data is modified.
///
/// An update should be triggered using the returned handle.
fn refresh(&self) -> Option<UpdateHandle> {
let mut view = self.view.borrow_mut();
view.clear();
for (key, item) in self.data.iter_vec(usize::MAX) {
if self.filter.matches(item) {
view.push(key);
}
}
self.filter.update_handle()
}
}
impl<T: ListData, F: Filter<T::Item>> Updatable for FilteredList<T, F> {
fn update_handle(&self) -> Option<UpdateHandle> {
self.filter.update_handle()
}
fn update_self(&self) -> Option<UpdateHandle> {
self.refresh()
}
}
impl<K, M, T: ListData + UpdatableHandler<K, M> +'static, F: Filter<T::Item>>
UpdatableHandler<K, M> for FilteredList<T, F>
{
fn handle(&self, key: &K, msg: &M) -> Option<UpdateHandle> {
self.data.handle(key, msg)
}
}
impl<T: ListData +'static, F: Filter<T::Item>> ListData for FilteredList<T, F> {
type Key = T::Key;
type Item = T::Item;
fn len(&self) -> usize {
self.view.borrow().len()
}
fn contains_key(&self, key: &Self::Key) -> bool {
self.get_cloned(key).is_some()
}
fn get_cloned(&self, key: &Self::Key) -> Option<Self::Item> {
// Check the item against our filter (probably O(1)) instead of using
// our filtered list (O(n) where n=self.len()).
self.data
.get_cloned(key)
.filter(|item| self.filter.matches(item.clone()))
}
fn update(&self, key: &Self::Key, value: Self::Item) -> Option<UpdateHandle> {
// Filtering does not affect result, but does affect the view
if self
.data
.get_cloned(key)
.map(|item|!self.filter.matches(item))
.unwrap_or(true)
{
// Not previously visible: no update occurs
return None;
}
let new_visible = self.filter.matches(value.clone());
let result = self.data.update(key, value);
if result.is_some() &&!new_visible {
// remove the updated item from our filtered list
self.view.borrow_mut().retain(|item| item!= key);
}
result
}
fn iter_vec_from(&self, start: usize, limit: usize) -> Vec<(Self::Key, Self::Item)> {
let view = self.view.borrow();
let end = self.len().min(start + limit);
if start >= end {
return Vec::new();
}
let mut v = Vec::with_capacity(end - start);
for k in &view[start..end] {
v.push((k.clone(), self.data.get_cloned(k).unwrap()));
}
v
}
}
/// Filter-list view widget
///
/// This widget is a wrapper around [`ListView`] which applies a filter to the
/// data list.
///
/// Why is a data-filter a widget and not a pure-data item, you ask? The answer
/// is that a filter-list must be updated when the filter or the data changes,
/// and, since filtering a list is not especially cheap, the filtering must be
/// cached and updated when required, not every time the list view asks for more
/// data. Although it is possible to do this with a data-view, that requires
/// machinery for recursive-updates on data-structures and/or a mechanism to
/// test whether the underlying list-data changed. Implementing as a widget
/// avoids this.
// TODO: impl Clone
#[derive(Debug, Widget)]
#[handler(handle=noauto, generics = <>)]
#[layout(single)]
#[widget(config=noauto)]
pub struct FilterListView<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg> +'static,
F: Filter<T::Item>,
V: Driver<T::Item> = driver::Default,
> {
#[widget_core]
core: CoreData,
#[widget]
list: ListView<D, FilteredList<T, F>, V>,
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`FilterListView::new_with_direction`].
pub fn new(data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), <V as Default>::default(), data, filter)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, data: T, filter: F) -> Self {
Self::new_with_dir_driver(direction, <V as Default>::default(), data, filter)
}
}
impl<
D: Directional + Default,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit view
pub fn new_with_driver(view: V, data: T, filter: F) -> Self {
Self::new_with_dir_driver(D::default(), view, data, filter)
}
}
impl<
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item> + Default,
> FilterListView<Direction, T, F, V>
{ | }
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> FilterListView<D, T, F, V>
{
/// Construct a new instance with explicit direction and view
pub fn new_with_dir_driver(direction: D, view: V, data: T, filter: F) -> Self {
let data = FilteredList::new(data, filter);
FilterListView {
core: Default::default(),
list: ListView::new_with_dir_driver(direction, view, data),
}
}
/// Access the stored data (pre-filter)
pub fn unfiltered_data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (pre-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn unfiltered_data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Access the stored data (post-filter)
pub fn data(&self) -> &T {
&self.list.data().data
}
/// Mutably access the stored data (post-filter)
///
/// It may be necessary to use [`FilterListView::update_view`] to update the view of this data.
pub fn data_mut(&mut self) -> &mut T {
&mut self.list.data_mut().data
}
/// Check whether a key has data (post-filter)
pub fn contains_key(&self, key: &T::Key) -> bool {
self.data().contains_key(key)
}
/// Get a copy of the shared value at `key` (post-filter)
pub fn get_value(&self, key: &T::Key) -> Option<T::Item> {
self.data().get_cloned(key)
}
/// Set shared data (post-filter)
///
/// This method updates the shared data, if supported (see
/// [`ListData::update`]). Other widgets sharing this data are notified
/// of the update, if data is changed.
pub fn set_value(&self, mgr: &mut Manager, key: &T::Key, data: T::Item) {
if let Some(handle) = self.data().update(key, data) {
mgr.trigger_update(handle, 0);
}
}
/// Update shared data (post-filter)
///
/// This is purely a convenience method over [`FilterListView::set_value`].
/// It does nothing if no value is found at `key`.
/// It notifies other widgets of updates to the shared data.
pub fn update_value<G: Fn(T::Item) -> T::Item>(&self, mgr: &mut Manager, key: &T::Key, f: G) {
if let Some(item) = self.get_value(key) {
self.set_value(mgr, key, f(item));
}
}
/// Get the current selection mode
pub fn selection_mode(&self) -> SelectionMode {
self.list.selection_mode()
}
/// Set the current selection mode
pub fn set_selection_mode(&mut self, mode: SelectionMode) -> TkAction {
self.list.set_selection_mode(mode)
}
/// Set the selection mode (inline)
pub fn with_selection_mode(mut self, mode: SelectionMode) -> Self {
let _ = self.set_selection_mode(mode);
self
}
/// Read the list of selected entries
///
/// With mode [`SelectionMode::Single`] this may contain zero or one entry;
/// use `selected_iter().next()` to extract only the first (optional) entry.
pub fn selected_iter(&'_ self) -> impl Iterator<Item = &'_ T::Key> + '_ {
self.list.selected_iter()
}
/// Check whether an entry is selected
pub fn is_selected(&self, key: &T::Key) -> bool {
self.list.is_selected(key)
}
/// Clear all selected items
///
/// Does not send [`ChildMsg`] responses.
pub fn clear_selected(&mut self) {
self.list.clear_selected()
}
/// Directly select an item
///
/// Returns `true` if selected, `false` if already selected.
/// Fails if selection mode does not permit selection or if the key is
/// invalid or filtered out.
///
/// Does not send [`ChildMsg`] responses.
pub fn select(&mut self, key: T::Key) -> Result<bool, SelectionError> {
self.list.select(key)
}
/// Directly deselect an item
///
/// Returns `true` if deselected, `false` if not previously selected.
/// Also returns `false` on invalid and filtered-out keys.
///
/// Does not send [`ChildMsg`] responses.
pub fn deselect(&mut self, key: &T::Key) -> bool {
self.list.deselect(key)
}
/// Manually trigger an update to handle changed data or filter
pub fn update_view(&mut self, mgr: &mut Manager) {
self.list.data().refresh();
self.list.update_view(mgr)
}
/// Get the direction of contents
pub fn direction(&self) -> Direction {
self.list.direction()
}
/// Set the preferred number of items visible (inline)
///
/// This affects the (ideal) size request and whether children are sized
/// according to their ideal or minimum size but not the minimum size.
pub fn with_num_visible(mut self, number: i32) -> Self {
self.list = self.list.with_num_visible(number);
self
}
}
// TODO: support derive(Scrollable)?
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Scrollable for FilterListView<D, T, F, V>
{
#[inline]
fn scroll_axes(&self, size: Size) -> (bool, bool) {
self.list.scroll_axes(size)
}
#[inline]
fn max_scroll_offset(&self) -> Offset {
self.list.max_scroll_offset()
}
#[inline]
fn scroll_offset(&self) -> Offset {
self.list.scroll_offset()
}
#[inline]
fn set_scroll_offset(&mut self, mgr: &mut Manager, offset: Offset) -> Offset {
self.list.set_scroll_offset(mgr, offset)
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> WidgetConfig for FilterListView<D, T, F, V>
{
fn configure(&mut self, mgr: &mut Manager) {
// We must refresh the filtered list when the underlying list changes
if let Some(handle) = self.list.data().data.update_handle() {
mgr.update_on_handle(handle, self.id());
}
// As well as when the filter changes
if let Some(handle) = self.list.data().update_handle() {
mgr.update_on_handle(handle, self.id());
}
}
}
impl<
D: Directional,
T: ListData + UpdHandler<T::Key, V::Msg>,
F: Filter<T::Item>,
V: Driver<T::Item>,
> Handler for FilterListView<D, T, F, V>
{
type Msg = ChildMsg<T::Key, <V::Widget as Handler>::Msg>;
fn handle(&mut self, mgr: &mut Manager, event: Event) -> Response<Self::Msg> {
match event {
Event::HandleUpdate {.. } => {
self.update_view(mgr);
return Response::Update;
}
_ => Response::None,
}
}
} | /// Set the direction of contents
pub fn set_direction(&mut self, direction: Direction) -> TkAction {
self.list.set_direction(direction) | random_line_split |
mod.rs | //! `types` module contains types necessary for Fluent runtime
//! value handling.
//! The core struct is [`FluentValue`] which is a type that can be passed
//! to the [`FluentBundle::format_pattern`](crate::bundle::FluentBundle) as an argument, it can be passed
//! to any Fluent Function, and any function may return it.
//!
//! This part of functionality is not fully hashed out yet, since we're waiting
//! for the internationalization APIs to mature, at which point all number
//! formatting operations will be moved out of Fluent.
//!
//! For now, [`FluentValue`] can be a string, a number, or a custom [`FluentType`]
//! which allows users of the library to implement their own types of values,
//! such as dates, or more complex structures needed for their bindings.
mod number;
mod plural;
pub use number::*;
use plural::PluralRules;
use std::any::Any;
use std::borrow::{Borrow, Cow};
use std::fmt;
use std::str::FromStr;
use intl_pluralrules::{PluralCategory, PluralRuleType};
use crate::memoizer::MemoizerKind;
use crate::resolver::Scope;
use crate::resource::FluentResource;
/// Custom types can implement the [`FluentType`] trait in order to generate a string
/// value for use in the message generation process.
pub trait FluentType: fmt::Debug + AnyEq +'static {
/// Create a clone of the underlying type.
fn duplicate(&self) -> Box<dyn FluentType + Send>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022".
fn as_string(&self, intls: &intl_memoizer::IntlLangMemoizer) -> Cow<'static, str>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022". This operation is provided the threadsafe
/// [IntlLangMemoizer](intl_memoizer::concurrent::IntlLangMemoizer).
fn as_string_threadsafe(
&self,
intls: &intl_memoizer::concurrent::IntlLangMemoizer,
) -> Cow<'static, str>;
}
impl PartialEq for dyn FluentType + Send {
fn eq(&self, other: &Self) -> bool {
self.equals(other.as_any())
}
}
pub trait AnyEq: Any +'static {
fn equals(&self, other: &dyn Any) -> bool;
fn as_any(&self) -> &dyn Any;
}
impl<T: Any + PartialEq> AnyEq for T {
fn equals(&self, other: &dyn Any) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |that| self == that)
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// The `FluentValue` enum represents values which can be formatted to a String.
///
/// Those values are either passed as arguments to [`FluentBundle::format_pattern`] or
/// produced by functions, or generated in the process of pattern resolution.
///
/// [`FluentBundle::format_pattern`]: crate::bundle::FluentBundle::format_pattern
#[derive(Debug)]
pub enum FluentValue<'source> {
String(Cow<'source, str>),
Number(FluentNumber),
Custom(Box<dyn FluentType + Send>),
None,
Error,
}
impl<'s> PartialEq for FluentValue<'s> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(FluentValue::String(s), FluentValue::String(s2)) => s == s2,
(FluentValue::Number(s), FluentValue::Number(s2)) => s == s2,
(FluentValue::Custom(s), FluentValue::Custom(s2)) => s == s2,
_ => false,
}
}
}
impl<'s> Clone for FluentValue<'s> {
fn clone(&self) -> Self {
match self {
FluentValue::String(s) => FluentValue::String(s.clone()),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => {
let new_value: Box<dyn FluentType + Send> = s.duplicate();
FluentValue::Custom(new_value)
}
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> FluentValue<'source> {
/// Attempts to parse the string representation of a `value` that supports
/// [`ToString`] into a [`FluentValue::Number`]. If it fails, it will instead
/// convert it to a [`FluentValue::String`].
///
/// ```
/// use fluent_bundle::types::{FluentNumber, FluentNumberOptions, FluentValue};
///
/// // "2" parses into a `FluentNumber`
/// assert_eq!(
/// FluentValue::try_number("2"),
/// FluentValue::Number(FluentNumber::new(2.0, FluentNumberOptions::default()))
/// );
///
/// // Floats can be parsed as well.
/// assert_eq!(
/// FluentValue::try_number("3.141569"),
/// FluentValue::Number(FluentNumber::new(
/// 3.141569,
/// FluentNumberOptions {
/// minimum_fraction_digits: Some(6),
/// ..Default::default()
/// }
/// ))
/// );
///
/// // When a value is not a valid number, it falls back to a `FluentValue::String`
/// assert_eq!(
/// FluentValue::try_number("A string"),
/// FluentValue::String("A string".into())
/// );
/// ```
pub fn try_number(value: &'source str) -> Self {
if let Ok(number) = FluentNumber::from_str(value) {
number.into()
} else {
value.into()
}
}
/// Checks to see if two [`FluentValues`](FluentValue) match each other by having the
/// same type and contents. The special exception is in the case of a string being
/// compared to a number. Here attempt to check that the plural rule category matches.
///
/// ```
/// use fluent_bundle::resolver::Scope;
/// use fluent_bundle::{types::FluentValue, FluentBundle, FluentResource};
/// use unic_langid::langid;
///
/// let langid_ars = langid!("en");
/// let bundle: FluentBundle<FluentResource> = FluentBundle::new(vec![langid_ars]);
/// let scope = Scope::new(&bundle, None, None);
///
/// // Matching examples:
/// assert!(FluentValue::try_number("2").matches(&FluentValue::try_number("2"), &scope));
/// assert!(FluentValue::from("fluent").matches(&FluentValue::from("fluent"), &scope));
/// assert!(
/// FluentValue::from("one").matches(&FluentValue::try_number("1"), &scope),
/// "Plural rules are matched."
/// );
///
/// // Non-matching examples:
/// assert!(!FluentValue::try_number("2").matches(&FluentValue::try_number("3"), &scope));
/// assert!(!FluentValue::from("fluent").matches(&FluentValue::from("not fluent"), &scope));
/// assert!(!FluentValue::from("two").matches(&FluentValue::try_number("100"), &scope),);
/// ```
pub fn matches<R: Borrow<FluentResource>, M>(
&self,
other: &FluentValue,
scope: &Scope<R, M>,
) -> bool
where
M: MemoizerKind,
{
match (self, other) {
(&FluentValue::String(ref a), &FluentValue::String(ref b)) => a == b,
(&FluentValue::Number(ref a), &FluentValue::Number(ref b)) => a == b,
(&FluentValue::String(ref a), &FluentValue::Number(ref b)) => {
let cat = match a.as_ref() {
"zero" => PluralCategory::ZERO,
"one" => PluralCategory::ONE,
"two" => PluralCategory::TWO,
"few" => PluralCategory::FEW,
"many" => PluralCategory::MANY,
"other" => PluralCategory::OTHER,
_ => return false,
};
// This string matches a plural rule keyword. Check if the number
// matches the plural rule category.
scope
.bundle
.intls
.with_try_get_threadsafe::<PluralRules, _, _>(
(PluralRuleType::CARDINAL,),
|pr| pr.0.select(b) == Ok(cat),
)
.unwrap()
}
_ => false,
}
}
/// Write out a string version of the [`FluentValue`] to `W`.
pub fn write<W, R, M>(&self, w: &mut W, scope: &Scope<R, M>) -> fmt::Result
where
W: fmt::Write,
R: Borrow<FluentResource>,
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return w.write_str(&val);
}
}
match self {
FluentValue::String(s) => w.write_str(s),
FluentValue::Number(n) => w.write_str(&n.as_string()),
FluentValue::Custom(s) => w.write_str(&scope.bundle.intls.stringify_value(&**s)),
FluentValue::Error => Ok(()),
FluentValue::None => Ok(()),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Clones inner values when owned, borrowed data is not cloned.
/// Prefer using [`FluentValue::into_string()`] when possible.
pub fn as_string<R: Borrow<FluentResource>, M>(&self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s.clone(),
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(&**s),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Takes self by-value to be able to skip expensive clones.
/// Prefer this method over [`FluentValue::as_string()`] when possible.
pub fn into_string<R: Borrow<FluentResource>, M>(self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(&self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s,
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(s.as_ref()),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
pub fn into_owned<'a>(&self) -> FluentValue<'a> {
match self {
FluentValue::String(str) => FluentValue::String(Cow::from(str.to_string())),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => FluentValue::Custom(s.duplicate()),
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> From<String> for FluentValue<'source> {
fn from(s: String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source String> for FluentValue<'source> {
fn from(s: &'source String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source str> for FluentValue<'source> {
fn from(s: &'source str) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<Cow<'source, str>> for FluentValue<'source> {
fn from(s: Cow<'source, str>) -> Self {
FluentValue::String(s)
}
}
impl<'source, T> From<Option<T>> for FluentValue<'source>
where
T: Into<FluentValue<'source>>,
{
fn | (v: Option<T>) -> Self {
match v {
Some(v) => v.into(),
None => FluentValue::None,
}
}
}
| from | identifier_name |
mod.rs | //! `types` module contains types necessary for Fluent runtime
//! value handling.
//! The core struct is [`FluentValue`] which is a type that can be passed
//! to the [`FluentBundle::format_pattern`](crate::bundle::FluentBundle) as an argument, it can be passed
//! to any Fluent Function, and any function may return it.
//!
//! This part of functionality is not fully hashed out yet, since we're waiting
//! for the internationalization APIs to mature, at which point all number
//! formatting operations will be moved out of Fluent.
//!
//! For now, [`FluentValue`] can be a string, a number, or a custom [`FluentType`]
//! which allows users of the library to implement their own types of values,
//! such as dates, or more complex structures needed for their bindings.
mod number;
mod plural;
pub use number::*;
use plural::PluralRules;
use std::any::Any;
use std::borrow::{Borrow, Cow};
use std::fmt;
use std::str::FromStr;
use intl_pluralrules::{PluralCategory, PluralRuleType};
use crate::memoizer::MemoizerKind;
use crate::resolver::Scope;
use crate::resource::FluentResource;
/// Custom types can implement the [`FluentType`] trait in order to generate a string
/// value for use in the message generation process.
pub trait FluentType: fmt::Debug + AnyEq +'static {
/// Create a clone of the underlying type.
fn duplicate(&self) -> Box<dyn FluentType + Send>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022".
fn as_string(&self, intls: &intl_memoizer::IntlLangMemoizer) -> Cow<'static, str>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022". This operation is provided the threadsafe
/// [IntlLangMemoizer](intl_memoizer::concurrent::IntlLangMemoizer).
fn as_string_threadsafe(
&self,
intls: &intl_memoizer::concurrent::IntlLangMemoizer,
) -> Cow<'static, str>;
}
impl PartialEq for dyn FluentType + Send {
fn eq(&self, other: &Self) -> bool {
self.equals(other.as_any())
}
}
pub trait AnyEq: Any +'static {
fn equals(&self, other: &dyn Any) -> bool;
fn as_any(&self) -> &dyn Any;
}
impl<T: Any + PartialEq> AnyEq for T {
fn equals(&self, other: &dyn Any) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |that| self == that)
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// The `FluentValue` enum represents values which can be formatted to a String.
///
/// Those values are either passed as arguments to [`FluentBundle::format_pattern`] or
/// produced by functions, or generated in the process of pattern resolution.
///
/// [`FluentBundle::format_pattern`]: crate::bundle::FluentBundle::format_pattern
#[derive(Debug)]
pub enum FluentValue<'source> {
String(Cow<'source, str>),
Number(FluentNumber),
Custom(Box<dyn FluentType + Send>),
None,
Error,
}
impl<'s> PartialEq for FluentValue<'s> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(FluentValue::String(s), FluentValue::String(s2)) => s == s2,
(FluentValue::Number(s), FluentValue::Number(s2)) => s == s2,
(FluentValue::Custom(s), FluentValue::Custom(s2)) => s == s2,
_ => false,
}
}
}
impl<'s> Clone for FluentValue<'s> {
fn clone(&self) -> Self {
match self {
FluentValue::String(s) => FluentValue::String(s.clone()),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => {
let new_value: Box<dyn FluentType + Send> = s.duplicate();
FluentValue::Custom(new_value)
}
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> FluentValue<'source> {
/// Attempts to parse the string representation of a `value` that supports
/// [`ToString`] into a [`FluentValue::Number`]. If it fails, it will instead
/// convert it to a [`FluentValue::String`].
///
/// ```
/// use fluent_bundle::types::{FluentNumber, FluentNumberOptions, FluentValue};
///
/// // "2" parses into a `FluentNumber`
/// assert_eq!(
/// FluentValue::try_number("2"),
/// FluentValue::Number(FluentNumber::new(2.0, FluentNumberOptions::default()))
/// );
///
/// // Floats can be parsed as well.
/// assert_eq!(
/// FluentValue::try_number("3.141569"),
/// FluentValue::Number(FluentNumber::new(
/// 3.141569,
/// FluentNumberOptions {
/// minimum_fraction_digits: Some(6),
/// ..Default::default()
/// }
/// ))
/// );
///
/// // When a value is not a valid number, it falls back to a `FluentValue::String`
/// assert_eq!(
/// FluentValue::try_number("A string"),
/// FluentValue::String("A string".into())
/// );
/// ```
pub fn try_number(value: &'source str) -> Self {
if let Ok(number) = FluentNumber::from_str(value) {
number.into()
} else {
value.into()
}
}
/// Checks to see if two [`FluentValues`](FluentValue) match each other by having the
/// same type and contents. The special exception is in the case of a string being
/// compared to a number. Here attempt to check that the plural rule category matches.
///
/// ```
/// use fluent_bundle::resolver::Scope;
/// use fluent_bundle::{types::FluentValue, FluentBundle, FluentResource};
/// use unic_langid::langid;
///
/// let langid_ars = langid!("en");
/// let bundle: FluentBundle<FluentResource> = FluentBundle::new(vec![langid_ars]);
/// let scope = Scope::new(&bundle, None, None);
///
/// // Matching examples:
/// assert!(FluentValue::try_number("2").matches(&FluentValue::try_number("2"), &scope));
/// assert!(FluentValue::from("fluent").matches(&FluentValue::from("fluent"), &scope));
/// assert!(
/// FluentValue::from("one").matches(&FluentValue::try_number("1"), &scope),
/// "Plural rules are matched."
/// );
///
/// // Non-matching examples:
/// assert!(!FluentValue::try_number("2").matches(&FluentValue::try_number("3"), &scope));
/// assert!(!FluentValue::from("fluent").matches(&FluentValue::from("not fluent"), &scope));
/// assert!(!FluentValue::from("two").matches(&FluentValue::try_number("100"), &scope),);
/// ```
pub fn matches<R: Borrow<FluentResource>, M>(
&self,
other: &FluentValue,
scope: &Scope<R, M>,
) -> bool
where
M: MemoizerKind,
{
match (self, other) {
(&FluentValue::String(ref a), &FluentValue::String(ref b)) => a == b,
(&FluentValue::Number(ref a), &FluentValue::Number(ref b)) => a == b,
(&FluentValue::String(ref a), &FluentValue::Number(ref b)) => {
let cat = match a.as_ref() {
"zero" => PluralCategory::ZERO,
"one" => PluralCategory::ONE,
"two" => PluralCategory::TWO,
"few" => PluralCategory::FEW,
"many" => PluralCategory::MANY,
"other" => PluralCategory::OTHER,
_ => return false,
};
// This string matches a plural rule keyword. Check if the number
// matches the plural rule category.
scope
.bundle
.intls
.with_try_get_threadsafe::<PluralRules, _, _>(
(PluralRuleType::CARDINAL,),
|pr| pr.0.select(b) == Ok(cat),
)
.unwrap()
}
_ => false,
}
}
/// Write out a string version of the [`FluentValue`] to `W`.
pub fn write<W, R, M>(&self, w: &mut W, scope: &Scope<R, M>) -> fmt::Result
where
W: fmt::Write,
R: Borrow<FluentResource>,
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return w.write_str(&val);
}
}
match self {
FluentValue::String(s) => w.write_str(s),
FluentValue::Number(n) => w.write_str(&n.as_string()),
FluentValue::Custom(s) => w.write_str(&scope.bundle.intls.stringify_value(&**s)),
FluentValue::Error => Ok(()),
FluentValue::None => Ok(()),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Clones inner values when owned, borrowed data is not cloned.
/// Prefer using [`FluentValue::into_string()`] when possible.
pub fn as_string<R: Borrow<FluentResource>, M>(&self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s.clone(),
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(&**s),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Takes self by-value to be able to skip expensive clones.
/// Prefer this method over [`FluentValue::as_string()`] when possible.
pub fn into_string<R: Borrow<FluentResource>, M>(self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(&self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s,
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(s.as_ref()),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
pub fn into_owned<'a>(&self) -> FluentValue<'a> {
match self {
FluentValue::String(str) => FluentValue::String(Cow::from(str.to_string())),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => FluentValue::Custom(s.duplicate()),
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> From<String> for FluentValue<'source> {
fn from(s: String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source String> for FluentValue<'source> {
fn from(s: &'source String) -> Self |
}
impl<'source> From<&'source str> for FluentValue<'source> {
fn from(s: &'source str) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<Cow<'source, str>> for FluentValue<'source> {
fn from(s: Cow<'source, str>) -> Self {
FluentValue::String(s)
}
}
impl<'source, T> From<Option<T>> for FluentValue<'source>
where
T: Into<FluentValue<'source>>,
{
fn from(v: Option<T>) -> Self {
match v {
Some(v) => v.into(),
None => FluentValue::None,
}
}
}
| {
FluentValue::String(s.into())
} | identifier_body |
mod.rs | //! `types` module contains types necessary for Fluent runtime
//! value handling.
//! The core struct is [`FluentValue`] which is a type that can be passed
//! to the [`FluentBundle::format_pattern`](crate::bundle::FluentBundle) as an argument, it can be passed
//! to any Fluent Function, and any function may return it.
//!
//! This part of functionality is not fully hashed out yet, since we're waiting
//! for the internationalization APIs to mature, at which point all number
//! formatting operations will be moved out of Fluent.
//!
//! For now, [`FluentValue`] can be a string, a number, or a custom [`FluentType`]
//! which allows users of the library to implement their own types of values,
//! such as dates, or more complex structures needed for their bindings.
mod number;
mod plural;
pub use number::*;
use plural::PluralRules;
use std::any::Any;
use std::borrow::{Borrow, Cow};
use std::fmt;
use std::str::FromStr;
use intl_pluralrules::{PluralCategory, PluralRuleType};
use crate::memoizer::MemoizerKind;
use crate::resolver::Scope;
use crate::resource::FluentResource;
/// Custom types can implement the [`FluentType`] trait in order to generate a string
/// value for use in the message generation process.
pub trait FluentType: fmt::Debug + AnyEq +'static {
/// Create a clone of the underlying type.
fn duplicate(&self) -> Box<dyn FluentType + Send>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022".
fn as_string(&self, intls: &intl_memoizer::IntlLangMemoizer) -> Cow<'static, str>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022". This operation is provided the threadsafe
/// [IntlLangMemoizer](intl_memoizer::concurrent::IntlLangMemoizer).
fn as_string_threadsafe(
&self,
intls: &intl_memoizer::concurrent::IntlLangMemoizer,
) -> Cow<'static, str>;
}
impl PartialEq for dyn FluentType + Send {
fn eq(&self, other: &Self) -> bool {
self.equals(other.as_any())
}
}
pub trait AnyEq: Any +'static {
fn equals(&self, other: &dyn Any) -> bool;
fn as_any(&self) -> &dyn Any;
}
impl<T: Any + PartialEq> AnyEq for T {
fn equals(&self, other: &dyn Any) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |that| self == that)
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// The `FluentValue` enum represents values which can be formatted to a String.
///
/// Those values are either passed as arguments to [`FluentBundle::format_pattern`] or
/// produced by functions, or generated in the process of pattern resolution.
///
/// [`FluentBundle::format_pattern`]: crate::bundle::FluentBundle::format_pattern
#[derive(Debug)]
pub enum FluentValue<'source> {
String(Cow<'source, str>),
Number(FluentNumber),
Custom(Box<dyn FluentType + Send>),
None,
Error,
}
impl<'s> PartialEq for FluentValue<'s> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(FluentValue::String(s), FluentValue::String(s2)) => s == s2,
(FluentValue::Number(s), FluentValue::Number(s2)) => s == s2,
(FluentValue::Custom(s), FluentValue::Custom(s2)) => s == s2,
_ => false,
}
}
}
impl<'s> Clone for FluentValue<'s> {
fn clone(&self) -> Self {
match self {
FluentValue::String(s) => FluentValue::String(s.clone()),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => {
let new_value: Box<dyn FluentType + Send> = s.duplicate();
FluentValue::Custom(new_value)
}
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> FluentValue<'source> {
/// Attempts to parse the string representation of a `value` that supports
/// [`ToString`] into a [`FluentValue::Number`]. If it fails, it will instead
/// convert it to a [`FluentValue::String`].
///
/// ```
/// use fluent_bundle::types::{FluentNumber, FluentNumberOptions, FluentValue};
///
/// // "2" parses into a `FluentNumber`
/// assert_eq!(
/// FluentValue::try_number("2"),
/// FluentValue::Number(FluentNumber::new(2.0, FluentNumberOptions::default()))
/// );
///
/// // Floats can be parsed as well.
/// assert_eq!(
/// FluentValue::try_number("3.141569"),
/// FluentValue::Number(FluentNumber::new(
/// 3.141569,
/// FluentNumberOptions {
/// minimum_fraction_digits: Some(6),
/// ..Default::default()
/// }
/// ))
/// );
///
/// // When a value is not a valid number, it falls back to a `FluentValue::String`
/// assert_eq!(
/// FluentValue::try_number("A string"),
/// FluentValue::String("A string".into())
/// );
/// ```
pub fn try_number(value: &'source str) -> Self {
if let Ok(number) = FluentNumber::from_str(value) {
number.into()
} else {
value.into()
}
}
/// Checks to see if two [`FluentValues`](FluentValue) match each other by having the
/// same type and contents. The special exception is in the case of a string being
/// compared to a number. Here attempt to check that the plural rule category matches.
///
/// ```
/// use fluent_bundle::resolver::Scope;
/// use fluent_bundle::{types::FluentValue, FluentBundle, FluentResource};
/// use unic_langid::langid;
///
/// let langid_ars = langid!("en");
/// let bundle: FluentBundle<FluentResource> = FluentBundle::new(vec![langid_ars]);
/// let scope = Scope::new(&bundle, None, None);
///
/// // Matching examples:
/// assert!(FluentValue::try_number("2").matches(&FluentValue::try_number("2"), &scope));
/// assert!(FluentValue::from("fluent").matches(&FluentValue::from("fluent"), &scope));
/// assert!(
/// FluentValue::from("one").matches(&FluentValue::try_number("1"), &scope),
/// "Plural rules are matched."
/// );
///
/// // Non-matching examples:
/// assert!(!FluentValue::try_number("2").matches(&FluentValue::try_number("3"), &scope));
/// assert!(!FluentValue::from("fluent").matches(&FluentValue::from("not fluent"), &scope));
/// assert!(!FluentValue::from("two").matches(&FluentValue::try_number("100"), &scope),);
/// ```
pub fn matches<R: Borrow<FluentResource>, M>(
&self,
other: &FluentValue,
scope: &Scope<R, M>,
) -> bool
where
M: MemoizerKind,
{
match (self, other) {
(&FluentValue::String(ref a), &FluentValue::String(ref b)) => a == b,
(&FluentValue::Number(ref a), &FluentValue::Number(ref b)) => a == b,
(&FluentValue::String(ref a), &FluentValue::Number(ref b)) => {
let cat = match a.as_ref() {
"zero" => PluralCategory::ZERO,
"one" => PluralCategory::ONE,
"two" => PluralCategory::TWO,
"few" => PluralCategory::FEW,
"many" => PluralCategory::MANY,
"other" => PluralCategory::OTHER,
_ => return false,
};
// This string matches a plural rule keyword. Check if the number
// matches the plural rule category.
scope
.bundle
.intls
.with_try_get_threadsafe::<PluralRules, _, _>(
(PluralRuleType::CARDINAL,),
|pr| pr.0.select(b) == Ok(cat),
)
.unwrap()
}
_ => false,
}
}
/// Write out a string version of the [`FluentValue`] to `W`.
pub fn write<W, R, M>(&self, w: &mut W, scope: &Scope<R, M>) -> fmt::Result
where
W: fmt::Write,
R: Borrow<FluentResource>,
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return w.write_str(&val); | match self {
FluentValue::String(s) => w.write_str(s),
FluentValue::Number(n) => w.write_str(&n.as_string()),
FluentValue::Custom(s) => w.write_str(&scope.bundle.intls.stringify_value(&**s)),
FluentValue::Error => Ok(()),
FluentValue::None => Ok(()),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Clones inner values when owned, borrowed data is not cloned.
/// Prefer using [`FluentValue::into_string()`] when possible.
pub fn as_string<R: Borrow<FluentResource>, M>(&self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s.clone(),
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(&**s),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Takes self by-value to be able to skip expensive clones.
/// Prefer this method over [`FluentValue::as_string()`] when possible.
pub fn into_string<R: Borrow<FluentResource>, M>(self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(&self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s,
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(s.as_ref()),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
pub fn into_owned<'a>(&self) -> FluentValue<'a> {
match self {
FluentValue::String(str) => FluentValue::String(Cow::from(str.to_string())),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => FluentValue::Custom(s.duplicate()),
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> From<String> for FluentValue<'source> {
fn from(s: String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source String> for FluentValue<'source> {
fn from(s: &'source String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source str> for FluentValue<'source> {
fn from(s: &'source str) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<Cow<'source, str>> for FluentValue<'source> {
fn from(s: Cow<'source, str>) -> Self {
FluentValue::String(s)
}
}
impl<'source, T> From<Option<T>> for FluentValue<'source>
where
T: Into<FluentValue<'source>>,
{
fn from(v: Option<T>) -> Self {
match v {
Some(v) => v.into(),
None => FluentValue::None,
}
}
} | }
} | random_line_split |
staged_file.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
fidl_fuchsia_io as fio, fuchsia_zircon as zx,
rand::{thread_rng, Rng},
tracing::warn,
};
const TEMPFILE_RANDOM_LENGTH: usize = 8usize;
/// Describes an error with StagedFile usage.
#[derive(thiserror::Error, Debug)]
pub enum StagedFileError {
/// Invalid arguments.
#[error("Invalid arguments to create a staged file: {0}")]
InvalidArguments(String),
/// Failed to open a file or directory.
#[error("Failed to open: {0}")]
OpenError(#[from] fuchsia_fs::node::OpenError),
/// Failed during a FIDL call.
#[error("Failed during FIDL call: {0}")]
FidlError(#[from] fidl::Error),
/// Failed to write to the staged file.
#[error("Failed to write to backing storage: {0}")]
WriteError(#[from] fuchsia_fs::file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
} | &temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect("could not read file data");
expected_data == bytes
}
#[fuchsia::test]
async fn test_cleanup_stale_files() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
// Write a variety of staged and non-staged files to the directory.
write_test_file_content(&dir, "staged-001", b"staged-001".as_ref()).await;
write_test_file_content(&dir, "real-001", b"real-001".as_ref()).await;
write_test_file_content(&dir, "staged-002", b"staged-002".as_ref()).await;
write_test_file_content(&dir, "real-002", b"real-002".as_ref()).await;
write_test_file_content(&dir, "staged-003", b"staged-003".as_ref()).await;
write_test_file_content(&dir, "004", b"004".as_ref()).await;
// Clean up stale files.
StagedFile::cleanup_stale_files(&dir, "staged-").await.unwrap();
// Ensure that only the non-staged files remain.
let dirents = fuchsia_fs::directory::readdir(&dir).await.unwrap();
assert_eq!(dirents.len(), 3);
assert!(file_exists_with_data(&dir, "real-001", b"real-001".as_ref()).await);
assert!(file_exists_with_data(&dir, "real-002", b"real-002".as_ref()).await);
assert!(file_exists_with_data(&dir, "004", b"004".as_ref()).await);
}
#[test]
fn test_generate_tempfile_name() {
let name1 = generate_tempfile_name("temp-12345");
let name2 = generate_tempfile_name("temp-12345");
let prefix = "temp-12345-";
assert!(name1.starts_with(prefix));
assert!(name2.starts_with(prefix));
assert_eq!(name1.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_eq!(name2.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_ne!(name1, name2);
}
} | let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy, | random_line_split |
staged_file.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
fidl_fuchsia_io as fio, fuchsia_zircon as zx,
rand::{thread_rng, Rng},
tracing::warn,
};
const TEMPFILE_RANDOM_LENGTH: usize = 8usize;
/// Describes an error with StagedFile usage.
#[derive(thiserror::Error, Debug)]
pub enum StagedFileError {
/// Invalid arguments.
#[error("Invalid arguments to create a staged file: {0}")]
InvalidArguments(String),
/// Failed to open a file or directory.
#[error("Failed to open: {0}")]
OpenError(#[from] fuchsia_fs::node::OpenError),
/// Failed during a FIDL call.
#[error("Failed during FIDL call: {0}")]
FidlError(#[from] fidl::Error),
/// Failed to write to the staged file.
#[error("Failed to write to backing storage: {0}")]
WriteError(#[from] fuchsia_fs::file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => |
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect("could not read file data");
expected_data == bytes
}
#[fuchsia::test]
async fn test_cleanup_stale_files() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
// Write a variety of staged and non-staged files to the directory.
write_test_file_content(&dir, "staged-001", b"staged-001".as_ref()).await;
write_test_file_content(&dir, "real-001", b"real-001".as_ref()).await;
write_test_file_content(&dir, "staged-002", b"staged-002".as_ref()).await;
write_test_file_content(&dir, "real-002", b"real-002".as_ref()).await;
write_test_file_content(&dir, "staged-003", b"staged-003".as_ref()).await;
write_test_file_content(&dir, "004", b"004".as_ref()).await;
// Clean up stale files.
StagedFile::cleanup_stale_files(&dir, "staged-").await.unwrap();
// Ensure that only the non-staged files remain.
let dirents = fuchsia_fs::directory::readdir(&dir).await.unwrap();
assert_eq!(dirents.len(), 3);
assert!(file_exists_with_data(&dir, "real-001", b"real-001".as_ref()).await);
assert!(file_exists_with_data(&dir, "real-002", b"real-002".as_ref()).await);
assert!(file_exists_with_data(&dir, "004", b"004".as_ref()).await);
}
#[test]
fn test_generate_tempfile_name() {
let name1 = generate_tempfile_name("temp-12345");
let name2 = generate_tempfile_name("temp-12345");
let prefix = "temp-12345-";
assert!(name1.starts_with(prefix));
assert!(name2.starts_with(prefix));
assert_eq!(name1.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_eq!(name2.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_ne!(name1, name2);
}
}
| {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
} | conditional_block |
staged_file.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
fidl_fuchsia_io as fio, fuchsia_zircon as zx,
rand::{thread_rng, Rng},
tracing::warn,
};
const TEMPFILE_RANDOM_LENGTH: usize = 8usize;
/// Describes an error with StagedFile usage.
#[derive(thiserror::Error, Debug)]
pub enum | {
/// Invalid arguments.
#[error("Invalid arguments to create a staged file: {0}")]
InvalidArguments(String),
/// Failed to open a file or directory.
#[error("Failed to open: {0}")]
OpenError(#[from] fuchsia_fs::node::OpenError),
/// Failed during a FIDL call.
#[error("Failed during FIDL call: {0}")]
FidlError(#[from] fidl::Error),
/// Failed to write to the staged file.
#[error("Failed to write to backing storage: {0}")]
WriteError(#[from] fuchsia_fs::file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect("could not read file data");
expected_data == bytes
}
#[fuchsia::test]
async fn test_cleanup_stale_files() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
// Write a variety of staged and non-staged files to the directory.
write_test_file_content(&dir, "staged-001", b"staged-001".as_ref()).await;
write_test_file_content(&dir, "real-001", b"real-001".as_ref()).await;
write_test_file_content(&dir, "staged-002", b"staged-002".as_ref()).await;
write_test_file_content(&dir, "real-002", b"real-002".as_ref()).await;
write_test_file_content(&dir, "staged-003", b"staged-003".as_ref()).await;
write_test_file_content(&dir, "004", b"004".as_ref()).await;
// Clean up stale files.
StagedFile::cleanup_stale_files(&dir, "staged-").await.unwrap();
// Ensure that only the non-staged files remain.
let dirents = fuchsia_fs::directory::readdir(&dir).await.unwrap();
assert_eq!(dirents.len(), 3);
assert!(file_exists_with_data(&dir, "real-001", b"real-001".as_ref()).await);
assert!(file_exists_with_data(&dir, "real-002", b"real-002".as_ref()).await);
assert!(file_exists_with_data(&dir, "004", b"004".as_ref()).await);
}
#[test]
fn test_generate_tempfile_name() {
let name1 = generate_tempfile_name("temp-12345");
let name2 = generate_tempfile_name("temp-12345");
let prefix = "temp-12345-";
assert!(name1.starts_with(prefix));
assert!(name2.starts_with(prefix));
assert_eq!(name1.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_eq!(name2.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_ne!(name1, name2);
}
}
| StagedFileError | identifier_name |
staged_file.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
fidl_fuchsia_io as fio, fuchsia_zircon as zx,
rand::{thread_rng, Rng},
tracing::warn,
};
const TEMPFILE_RANDOM_LENGTH: usize = 8usize;
/// Describes an error with StagedFile usage.
#[derive(thiserror::Error, Debug)]
pub enum StagedFileError {
/// Invalid arguments.
#[error("Invalid arguments to create a staged file: {0}")]
InvalidArguments(String),
/// Failed to open a file or directory.
#[error("Failed to open: {0}")]
OpenError(#[from] fuchsia_fs::node::OpenError),
/// Failed during a FIDL call.
#[error("Failed during FIDL call: {0}")]
FidlError(#[from] fidl::Error),
/// Failed to write to the staged file.
#[error("Failed to write to backing storage: {0}")]
WriteError(#[from] fuchsia_fs::file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect("could not read file data");
expected_data == bytes
}
#[fuchsia::test]
async fn test_cleanup_stale_files() | let dirents = fuchsia_fs::directory::readdir(&dir).await.unwrap();
assert_eq!(dirents.len(), 3);
assert!(file_exists_with_data(&dir, "real-001", b"real-001".as_ref()).await);
assert!(file_exists_with_data(&dir, "real-002", b"real-002".as_ref()).await);
assert!(file_exists_with_data(&dir, "004", b"004".as_ref()).await);
}
#[test]
fn test_generate_tempfile_name() {
let name1 = generate_tempfile_name("temp-12345");
let name2 = generate_tempfile_name("temp-12345");
let prefix = "temp-12345-";
assert!(name1.starts_with(prefix));
assert!(name2.starts_with(prefix));
assert_eq!(name1.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_eq!(name2.len(), prefix.len() + TEMPFILE_RANDOM_LENGTH);
assert_ne!(name1, name2);
}
}
| {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
// Write a variety of staged and non-staged files to the directory.
write_test_file_content(&dir, "staged-001", b"staged-001".as_ref()).await;
write_test_file_content(&dir, "real-001", b"real-001".as_ref()).await;
write_test_file_content(&dir, "staged-002", b"staged-002".as_ref()).await;
write_test_file_content(&dir, "real-002", b"real-002".as_ref()).await;
write_test_file_content(&dir, "staged-003", b"staged-003".as_ref()).await;
write_test_file_content(&dir, "004", b"004".as_ref()).await;
// Clean up stale files.
StagedFile::cleanup_stale_files(&dir, "staged-").await.unwrap();
// Ensure that only the non-staged files remain. | identifier_body |
result.rs | //! Operation result structures and helpers.
//!
//! Most LDAP operations return an [`LdapResult`](struct.LdapResult.html). This module
//! contains its definition, as well as that of a number of wrapper structs and
//! helper methods, which adapt LDAP result and error handling to be a closer
//! match to Rust conventions.
use std::error::Error;
use std::fmt;
use std::io;
use std::result::Result as StdResult;
use crate::controls::Control;
use crate::exop::Exop;
use crate::protocol::{LdapOp, MaybeControls, ResultSender};
use crate::search::parse_refs;
use crate::search::ResultEntry;
use crate::RequestId;
use lber::common::TagClass;
use lber::parse::parse_uint;
use lber::structures::Tag;
use lber::universal::Types;
use lber::IResult;
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
use tokio::time;
/// Type alias for the standard `Result` with the fixed `LdapError` error part.
pub type Result<T> = std::result::Result<T, LdapError>;
/// Error variants recognized by the library.
#[derive(Debug, Error)]
pub enum LdapError {
/// No path given for a `ldapi://` URL.
#[error("empty Unix domain socket path")]
EmptyUnixPath,
/// A `ldapi://` URL contains a port spec, which it shouldn't.
#[error("the port must be empty in the ldapi scheme")]
PortInUnixPath,
/// Encapsulated I/O error.
#[error("I/O error: {source}")]
Io {
#[from]
source: io::Error,
},
/// Error while sending an operation to the connection handler.
#[error("op send error: {source}")]
OpSend {
#[from]
source: mpsc::error::SendError<(RequestId, LdapOp, Tag, MaybeControls, ResultSender)>,
},
/// Error while receiving operation results from the connection handler.
#[error("result recv error: {source}")]
ResultRecv {
#[from]
source: oneshot::error::RecvError,
},
/// Error while sending an internal ID scrubbing request to the connection handler.
#[error("id scrub send error: {source}")]
IdScrubSend {
#[from]
source: mpsc::error::SendError<RequestId>,
},
/// Operation or connection timeout.
#[error("timeout: {elapsed}")]
Timeout {
#[from]
elapsed: time::Elapsed,
},
/// Error parsing the string representation of a search filter.
#[error("filter parse error")]
FilterParsing,
/// Premature end of a search stream.
#[error("premature end of search stream")]
EndOfStream,
/// URL parsing error.
#[error("url parse error: {source}")]
UrlParsing {
#[from]
source: url::ParseError,
},
/// Unknown LDAP URL scheme.
#[error("unknown LDAP URL scheme: {0}")]
UnknownScheme(String),
#[cfg(feature = "tls-native")]
/// Native TLS library error.
#[error("native TLS error: {source}")]
NativeTLS {
#[from]
source: native_tls::Error,
},
#[cfg(feature = "tls-rustls")]
/// Rustls library error.
#[error("rustls error: {source}")]
Rustls {
#[from]
source: rustls::TLSError,
},
#[cfg(feature = "tls-rustls")]
/// Rustls DNS name error.
#[error("rustls DNS error: {source}")]
DNSName {
#[from]
source: tokio_rustls::webpki::InvalidDNSNameError,
},
/// LDAP operation result with an error return code.
#[error("LDAP operation result: {result}")]
LdapResult {
#[from]
result: LdapResult,
},
/// No values provided for the Add operation.
#[error("empty value set for Add")]
AddNoValues,
/// No values provided for the Add operation.
#[error("adapter init error: {0}")]
AdapterInit(String),
}
impl From<LdapError> for io::Error {
fn from(le: LdapError) -> io::Error {
match le {
LdapError::Io { source,.. } => source,
_ => io::Error::new(io::ErrorKind::Other, format!("{}", le)),
}
}
}
/// Common components of an LDAP operation result.
///
/// This structure faithfully replicates the components dictated by the standard,
/// and is distinctly C-like with its reliance on numeric codes for the indication
/// of outcome. It would be tempting to hide it behind an automatic `Result`-like
/// interface, but there are scenarios where this would preclude intentional
/// incorporation of error conditions into query design. Instead, the struct
/// implements helper methods, [`success()`](#method.success) and
/// [`non_error()`](#method.non_error), which may be used for ergonomic error
/// handling when simple condition checking suffices.
#[derive(Clone, Debug)]
pub struct LdapResult {
/// Result code.
///
/// Generally, the value of zero indicates successful completion, but there's
/// a number of other non-error codes arising as a result of various operations.
/// See [Section A.1 of RFC 4511](https://tools.ietf.org/html/rfc4511#appendix-A.1).
pub rc: u32,
/// Matched component DN, where applicable.
pub matched: String,
/// Additional diagnostic text.
pub text: String,
/// Referrals.
///
/// Absence of referrals is represented by an empty vector.
pub refs: Vec<String>,
/// Response controls.
///
/// Missing and empty controls are both represented by an empty vector.
pub ctrls: Vec<Control>,
}
#[doc(hidden)]
impl From<Tag> for LdapResult {
fn from(t: Tag) -> LdapResult {
<LdapResultExt as From<Tag>>::from(t).0
}
}
impl Error for LdapResult {}
impl fmt::Display for LdapResult {
fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> {
fn description(this: &LdapResult) -> &'static str {
match this.rc {
0 => "success",
1 => "operationsError",
2 => "protocolError",
3 => "timeLimitExceeded",
4 => "sizeLimitExceeded",
5 => "compareFalse",
6 => "compareTrue",
7 => "authMethodNotSupported",
8 => "strongerAuthRequired",
10 => "referral",
11 => "adminLimitExceeded",
12 => "unavailableCriticalExtension",
13 => "confidentialityRequired",
14 => "saslBindInProgress",
16 => "noSuchAttribute",
17 => "undefinedAttributeType",
18 => "inappropriateMatching",
19 => "constraintViolation",
20 => "attributeOrValueExists",
21 => "invalidAttributeSyntax",
32 => "noSuchObject",
33 => "aliasProblem",
34 => "invalidDNSyntax",
36 => "aliasDereferencingProblem",
48 => "inappropriateAuthentication",
49 => "invalidCredentials",
50 => "insufficientAccessRights",
51 => "busy",
52 => "unavailable",
53 => "unwillingToPerform",
54 => "loopDetect",
64 => "namingViolation",
65 => "objectClassViolation",
66 => "notAllowedOnNonLeaf",
67 => "notAllowedOnRDN",
68 => "entryAlreadyExists",
69 => "objectClassModsProhibited",
71 => "affectsMultipleDSAs",
80 => "other",
88 => "abandoned",
122 => "assertionFailed",
_ => "unknown",
}
}
write!(
f,
"rc={} ({}), dn: \"{}\", text: \"{}\"",
self.rc,
description(self),
self.matched,
self.text
)
}
}
impl LdapResult {
/// If the result code is zero, return the instance itself wrapped
/// in `Ok()`, otherwise wrap the instance in an `LdapError`.
pub fn success(self) -> Result<Self> {
if self.rc == 0 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
/// If the result code is 0 or 10 (referral), return the instance
/// itself wrapped in `Ok()`, otherwise wrap the instance in an
/// `LdapError`.
pub fn non_error(self) -> Result<Self> {
if self.rc == 0 || self.rc == 10 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct LdapResultExt(pub LdapResult, pub Exop);
impl From<Tag> for LdapResultExt {
fn from(t: Tag) -> LdapResultExt {
let t = match t {
Tag::StructureTag(t) => t,
Tag::Null(_) => {
return LdapResultExt(
LdapResult {
rc: 0,
matched: String::from(""),
text: String::from(""),
refs: vec![],
ctrls: vec![],
},
Exop {
name: None,
val: None,
},
)
}
_ => unimplemented!(),
};
let mut tags = t.expect_constructed().expect("result sequence").into_iter();
let rc = match parse_uint(
tags.next()
.expect("element")
.match_class(TagClass::Universal)
.and_then(|t| t.match_id(Types::Enumerated as u64))
.and_then(|t| t.expect_primitive())
.expect("result code")
.as_slice(),
) {
IResult::Done(_, rc) => rc as u32,
_ => panic!("failed to parse result code"),
};
let matched = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("matched dn");
let text = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("diagnostic message");
let mut refs = Vec::new();
let mut exop_name = None;
let mut exop_val = None;
loop {
match tags.next() {
None => break,
Some(comp) => match comp.id {
3 => {
refs.extend(parse_refs(comp));
}
10 => {
exop_name = Some(
String::from_utf8(comp.expect_primitive().expect("octet string"))
.expect("exop name"),
);
}
11 => {
exop_val = Some(comp.expect_primitive().expect("octet string"));
}
_ => (),
},
}
}
LdapResultExt(
LdapResult {
rc,
matched,
text,
refs,
ctrls: vec![],
},
Exop {
name: exop_name,
val: exop_val,
},
)
}
}
/// Wrapper for results of a Search operation which returns all entries at once.
///
/// The wrapper exists so that methods [`success()`](#method.success) and
/// [`non_error()`](#method.non_error) can be called on an instance. Those methods
/// destructure the wrapper and return its components as elements of an anonymous
/// tuple.
#[derive(Clone, Debug)]
pub struct SearchResult(pub Vec<ResultEntry>, pub LdapResult);
impl SearchResult {
/// If the result code is zero, return an anonymous tuple of component structs
/// wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn success(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
/// If the result code is 0 or 10 (referral), return an anonymous tuple of component
/// structs wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn non_error(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 || self.1.rc == 10 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
}
/// Wrapper for the result of a Compare operation.
///
/// Compare uniquely has two non-zero return codes to indicate the outcome of a successful
/// comparison, while other return codes indicate errors, as usual (except 10 for referral).
/// The [`equal()`](#method.equal) method optimizes for the expected case of ignoring
/// referrals; [`non_error()`](#method.non_error) can be used when that's not possible.
#[derive(Clone, Debug)]
pub struct | (pub LdapResult);
impl CompareResult {
/// If the result code is 5 (compareFalse) or 6 (compareTrue), return the corresponding
/// boolean value wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn equal(self) -> Result<bool> {
match self.0.rc {
5 => Ok(false),
6 => Ok(true),
_ => Err(LdapError::from(self.0)),
}
}
/// If the result code is 5 (compareFalse), 6 (compareTrue), or 10 (referral), return
/// the inner `LdapResult`, otherwise rewrap `LdapResult` in an `LdapError`.
pub fn non_error(self) -> Result<LdapResult> {
if self.0.rc == 5 || self.0.rc == 6 || self.0.rc == 10 {
Ok(self.0)
} else {
Err(LdapError::from(self.0))
}
}
}
/// Wrapper for the result of an Extended operation.
///
/// Similarly to [`SearchResult`](struct.SearchResult.html), methods
/// [`success()`](#method.success) and [`non_error()`](#method.non_error) can be
/// called on an instance, and will destructure the wrapper into an anonymous
/// tuple of its components.
#[derive(Clone, Debug)]
pub struct ExopResult(pub Exop, pub LdapResult);
impl ExopResult {
/// If the result code is zero, return an anonymous tuple of component structs
/// wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn success(self) -> Result<(Exop, LdapResult)> {
if self.1.rc == 0 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
/// If the result code is 0 or 10 (referral), return an anonymous tuple of component
/// structs wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn non_error(self) -> Result<(Exop, LdapResult)> {
if self.1.rc == 0 || self.1.rc == 10 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
}
| CompareResult | identifier_name |
result.rs | //! Operation result structures and helpers.
//!
//! Most LDAP operations return an [`LdapResult`](struct.LdapResult.html). This module
//! contains its definition, as well as that of a number of wrapper structs and
//! helper methods, which adapt LDAP result and error handling to be a closer
//! match to Rust conventions.
use std::error::Error;
use std::fmt;
use std::io;
use std::result::Result as StdResult;
use crate::controls::Control;
use crate::exop::Exop;
use crate::protocol::{LdapOp, MaybeControls, ResultSender};
use crate::search::parse_refs;
use crate::search::ResultEntry;
use crate::RequestId;
use lber::common::TagClass;
use lber::parse::parse_uint;
use lber::structures::Tag;
use lber::universal::Types;
use lber::IResult;
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
use tokio::time;
/// Type alias for the standard `Result` with the fixed `LdapError` error part.
pub type Result<T> = std::result::Result<T, LdapError>;
/// Error variants recognized by the library.
#[derive(Debug, Error)]
pub enum LdapError {
/// No path given for a `ldapi://` URL.
#[error("empty Unix domain socket path")]
EmptyUnixPath,
/// A `ldapi://` URL contains a port spec, which it shouldn't.
#[error("the port must be empty in the ldapi scheme")]
PortInUnixPath,
/// Encapsulated I/O error.
#[error("I/O error: {source}")]
Io {
#[from]
source: io::Error,
},
/// Error while sending an operation to the connection handler.
#[error("op send error: {source}")]
OpSend {
#[from]
source: mpsc::error::SendError<(RequestId, LdapOp, Tag, MaybeControls, ResultSender)>,
},
/// Error while receiving operation results from the connection handler.
#[error("result recv error: {source}")]
ResultRecv {
#[from]
source: oneshot::error::RecvError,
},
/// Error while sending an internal ID scrubbing request to the connection handler.
#[error("id scrub send error: {source}")]
IdScrubSend {
#[from]
source: mpsc::error::SendError<RequestId>,
},
/// Operation or connection timeout.
#[error("timeout: {elapsed}")]
Timeout {
#[from]
elapsed: time::Elapsed,
},
/// Error parsing the string representation of a search filter.
#[error("filter parse error")]
FilterParsing,
/// Premature end of a search stream.
#[error("premature end of search stream")]
EndOfStream,
/// URL parsing error.
#[error("url parse error: {source}")]
UrlParsing {
#[from]
source: url::ParseError,
},
/// Unknown LDAP URL scheme.
#[error("unknown LDAP URL scheme: {0}")]
UnknownScheme(String),
#[cfg(feature = "tls-native")]
/// Native TLS library error.
#[error("native TLS error: {source}")]
NativeTLS {
#[from]
source: native_tls::Error,
},
#[cfg(feature = "tls-rustls")]
/// Rustls library error.
#[error("rustls error: {source}")]
Rustls {
#[from]
source: rustls::TLSError,
},
#[cfg(feature = "tls-rustls")]
/// Rustls DNS name error.
#[error("rustls DNS error: {source}")]
DNSName {
#[from]
source: tokio_rustls::webpki::InvalidDNSNameError,
},
/// LDAP operation result with an error return code.
#[error("LDAP operation result: {result}")]
LdapResult {
#[from]
result: LdapResult,
},
/// No values provided for the Add operation.
#[error("empty value set for Add")]
AddNoValues,
/// No values provided for the Add operation.
#[error("adapter init error: {0}")]
AdapterInit(String),
}
impl From<LdapError> for io::Error {
fn from(le: LdapError) -> io::Error {
match le {
LdapError::Io { source,.. } => source,
_ => io::Error::new(io::ErrorKind::Other, format!("{}", le)),
}
}
}
/// Common components of an LDAP operation result.
/// | /// interface, but there are scenarios where this would preclude intentional
/// incorporation of error conditions into query design. Instead, the struct
/// implements helper methods, [`success()`](#method.success) and
/// [`non_error()`](#method.non_error), which may be used for ergonomic error
/// handling when simple condition checking suffices.
#[derive(Clone, Debug)]
pub struct LdapResult {
/// Result code.
///
/// Generally, the value of zero indicates successful completion, but there's
/// a number of other non-error codes arising as a result of various operations.
/// See [Section A.1 of RFC 4511](https://tools.ietf.org/html/rfc4511#appendix-A.1).
pub rc: u32,
/// Matched component DN, where applicable.
pub matched: String,
/// Additional diagnostic text.
pub text: String,
/// Referrals.
///
/// Absence of referrals is represented by an empty vector.
pub refs: Vec<String>,
/// Response controls.
///
/// Missing and empty controls are both represented by an empty vector.
pub ctrls: Vec<Control>,
}
#[doc(hidden)]
impl From<Tag> for LdapResult {
fn from(t: Tag) -> LdapResult {
<LdapResultExt as From<Tag>>::from(t).0
}
}
impl Error for LdapResult {}
impl fmt::Display for LdapResult {
fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> {
fn description(this: &LdapResult) -> &'static str {
match this.rc {
0 => "success",
1 => "operationsError",
2 => "protocolError",
3 => "timeLimitExceeded",
4 => "sizeLimitExceeded",
5 => "compareFalse",
6 => "compareTrue",
7 => "authMethodNotSupported",
8 => "strongerAuthRequired",
10 => "referral",
11 => "adminLimitExceeded",
12 => "unavailableCriticalExtension",
13 => "confidentialityRequired",
14 => "saslBindInProgress",
16 => "noSuchAttribute",
17 => "undefinedAttributeType",
18 => "inappropriateMatching",
19 => "constraintViolation",
20 => "attributeOrValueExists",
21 => "invalidAttributeSyntax",
32 => "noSuchObject",
33 => "aliasProblem",
34 => "invalidDNSyntax",
36 => "aliasDereferencingProblem",
48 => "inappropriateAuthentication",
49 => "invalidCredentials",
50 => "insufficientAccessRights",
51 => "busy",
52 => "unavailable",
53 => "unwillingToPerform",
54 => "loopDetect",
64 => "namingViolation",
65 => "objectClassViolation",
66 => "notAllowedOnNonLeaf",
67 => "notAllowedOnRDN",
68 => "entryAlreadyExists",
69 => "objectClassModsProhibited",
71 => "affectsMultipleDSAs",
80 => "other",
88 => "abandoned",
122 => "assertionFailed",
_ => "unknown",
}
}
write!(
f,
"rc={} ({}), dn: \"{}\", text: \"{}\"",
self.rc,
description(self),
self.matched,
self.text
)
}
}
impl LdapResult {
/// If the result code is zero, return the instance itself wrapped
/// in `Ok()`, otherwise wrap the instance in an `LdapError`.
pub fn success(self) -> Result<Self> {
if self.rc == 0 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
/// If the result code is 0 or 10 (referral), return the instance
/// itself wrapped in `Ok()`, otherwise wrap the instance in an
/// `LdapError`.
pub fn non_error(self) -> Result<Self> {
if self.rc == 0 || self.rc == 10 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct LdapResultExt(pub LdapResult, pub Exop);
impl From<Tag> for LdapResultExt {
fn from(t: Tag) -> LdapResultExt {
let t = match t {
Tag::StructureTag(t) => t,
Tag::Null(_) => {
return LdapResultExt(
LdapResult {
rc: 0,
matched: String::from(""),
text: String::from(""),
refs: vec![],
ctrls: vec![],
},
Exop {
name: None,
val: None,
},
)
}
_ => unimplemented!(),
};
let mut tags = t.expect_constructed().expect("result sequence").into_iter();
let rc = match parse_uint(
tags.next()
.expect("element")
.match_class(TagClass::Universal)
.and_then(|t| t.match_id(Types::Enumerated as u64))
.and_then(|t| t.expect_primitive())
.expect("result code")
.as_slice(),
) {
IResult::Done(_, rc) => rc as u32,
_ => panic!("failed to parse result code"),
};
let matched = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("matched dn");
let text = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("diagnostic message");
let mut refs = Vec::new();
let mut exop_name = None;
let mut exop_val = None;
loop {
match tags.next() {
None => break,
Some(comp) => match comp.id {
3 => {
refs.extend(parse_refs(comp));
}
10 => {
exop_name = Some(
String::from_utf8(comp.expect_primitive().expect("octet string"))
.expect("exop name"),
);
}
11 => {
exop_val = Some(comp.expect_primitive().expect("octet string"));
}
_ => (),
},
}
}
LdapResultExt(
LdapResult {
rc,
matched,
text,
refs,
ctrls: vec![],
},
Exop {
name: exop_name,
val: exop_val,
},
)
}
}
/// Wrapper for results of a Search operation which returns all entries at once.
///
/// The wrapper exists so that methods [`success()`](#method.success) and
/// [`non_error()`](#method.non_error) can be called on an instance. Those methods
/// destructure the wrapper and return its components as elements of an anonymous
/// tuple.
#[derive(Clone, Debug)]
pub struct SearchResult(pub Vec<ResultEntry>, pub LdapResult);
impl SearchResult {
/// If the result code is zero, return an anonymous tuple of component structs
/// wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn success(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
/// If the result code is 0 or 10 (referral), return an anonymous tuple of component
/// structs wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn non_error(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 || self.1.rc == 10 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
}
/// Wrapper for the result of a Compare operation.
///
/// Compare uniquely has two non-zero return codes to indicate the outcome of a successful
/// comparison, while other return codes indicate errors, as usual (except 10 for referral).
/// The [`equal()`](#method.equal) method optimizes for the expected case of ignoring
/// referrals; [`non_error()`](#method.non_error) can be used when that's not possible.
#[derive(Clone, Debug)]
pub struct CompareResult(pub LdapResult);
impl CompareResult {
/// If the result code is 5 (compareFalse) or 6 (compareTrue), return the corresponding
/// boolean value wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn equal(self) -> Result<bool> {
match self.0.rc {
5 => Ok(false),
6 => Ok(true),
_ => Err(LdapError::from(self.0)),
}
}
/// If the result code is 5 (compareFalse), 6 (compareTrue), or 10 (referral), return
/// the inner `LdapResult`, otherwise rewrap `LdapResult` in an `LdapError`.
pub fn non_error(self) -> Result<LdapResult> {
if self.0.rc == 5 || self.0.rc == 6 || self.0.rc == 10 {
Ok(self.0)
} else {
Err(LdapError::from(self.0))
}
}
}
/// Wrapper for the result of an Extended operation.
///
/// Similarly to [`SearchResult`](struct.SearchResult.html), methods
/// [`success()`](#method.success) and [`non_error()`](#method.non_error) can be
/// called on an instance, and will destructure the wrapper into an anonymous
/// tuple of its components.
#[derive(Clone, Debug)]
pub struct ExopResult(pub Exop, pub LdapResult);
impl ExopResult {
/// If the result code is zero, return an anonymous tuple of component structs
/// wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn success(self) -> Result<(Exop, LdapResult)> {
if self.1.rc == 0 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
/// If the result code is 0 or 10 (referral), return an anonymous tuple of component
/// structs wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn non_error(self) -> Result<(Exop, LdapResult)> {
if self.1.rc == 0 || self.1.rc == 10 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
} | /// This structure faithfully replicates the components dictated by the standard,
/// and is distinctly C-like with its reliance on numeric codes for the indication
/// of outcome. It would be tempting to hide it behind an automatic `Result`-like | random_line_split |
deflate.rs | use super::Source;
use super::huffman::Huffman;
use super::bitstream::BitStream;
use super::bufferedwriter::BufferedWriter;
struct DynamicBlock {
// If we encounter a repeat, we should set our state so that we keep producing
// repeats until they run out, instead of reading from the bit stream.
repeats_remaining: usize,
last_repeat_distance: usize,
// Used to determine next byte from next bit(s) in stream
literal_length_huffman: Huffman<usize>,
distance_huffman: Huffman<usize>,
}
impl DynamicBlock {
fn new(bit_stream: &mut BitStream) -> DynamicBlock {
let hlit = bit_stream.next_bits(5) as usize + 257;
let hdist = bit_stream.next_bits(5) as usize + 1;
let hclen = bit_stream.next_bits(4) as usize + 4;
println!("hlit : {}", hlit);
println!("hdist : {}", hdist);
println!("hclen : {}", hclen);
// Read this first, this is next in the format and contains the code length
// compression tree to decode the literal/length huffman and the distance huffman
let code_length_huffman = DynamicBlock::read_code_length_huffman(hclen, bit_stream);
// Use code_length_huffman to build the literal_length and distance huffmans
let literal_length_huffman = DynamicBlock::read_literal_length_huffman(hlit, bit_stream, &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0... 3 => 0,
4... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257... 264 => 0,
265... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257... 264 => value - 254,
265... 268 => 11 + 2 * (value - 265),
269... 272 => 19 + 4 * (value - 269),
273... 276 => 35 + 8 * (value - 273),
277... 280 => 67 + 16 * (value - 277),
281... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct DEFLATEReader<'a> {
// The following two fields are used to manage input/output
buffered_writer: BufferedWriter,
bit_stream: BitStream<'a>,
// The following two fields control if we read another block
has_seen_final_block: bool,
current_block: Option<DynamicBlock>,
}
const BTYPE_DYNAMIC : u8 = 0b10;
impl <'a> DEFLATEReader<'a> {
pub fn new<T : Source<u8> + 'a>(input: &'a mut T) -> DEFLATEReader<'a> |
}
impl <'a> Source<Option<u8>> for DEFLATEReader<'a> {
fn next(self: &mut DEFLATEReader<'a>) -> Option<u8> {
// We set this field when we have finished with a block or haven't started yet
// this field tells us that we should begin reading a new block which involves
// decoding all the headers and huffman trees
if self.current_block.is_none() {
// We have already fully consumed the final block
if self.has_seen_final_block {
return None
}
// Is this the final block?
let b_final = self.bit_stream.next();
println!("b_final: {}", b_final);
self.has_seen_final_block = b_final;
// Is what type of block is this?
let b_type = self.bit_stream.next_bits(2) as u8;
println!("b_type : {:02b}", b_type);
self.current_block = match b_type {
BTYPE_DYNAMIC => Some(DynamicBlock::new(&mut self.bit_stream)),
_ => panic!("Unhandled block type: {:02b}", b_type),
};
}
// The above actions should have established a current block if one was needed
assert!(self.current_block.is_some());
match self.current_block.as_mut() {
Some(block) => match block.next(&mut self.bit_stream) {
DEFLATEResult::Literal(byte) => {
println!("Is Literal..");
self.buffered_writer.write(byte);
Some(byte)
},
DEFLATEResult::EndOfBlock => {
println!("Is EOB..");
self.current_block = None;
let byte = self.next();
println!("Continuing EOB");
// We probably don't need the following because
// the other branches handle writing already
// if byte.is_some() {
// self.buffered_writer.write(byte.unwrap());
// }
byte
},
DEFLATEResult::Repeat(distance) => {
println!("Is Repeat..");
Some(self.buffered_writer.repeat(distance))
},
},
None => panic!("We should have assured not none above"),
}
}
}
| {
DEFLATEReader{
buffered_writer: BufferedWriter::new(),
bit_stream: BitStream::new(input),
has_seen_final_block: false,
current_block: None,
}
} | identifier_body |
deflate.rs | use super::Source;
use super::huffman::Huffman;
use super::bitstream::BitStream;
use super::bufferedwriter::BufferedWriter;
struct DynamicBlock {
// If we encounter a repeat, we should set our state so that we keep producing
// repeats until they run out, instead of reading from the bit stream.
repeats_remaining: usize,
last_repeat_distance: usize,
// Used to determine next byte from next bit(s) in stream
literal_length_huffman: Huffman<usize>,
distance_huffman: Huffman<usize>,
}
impl DynamicBlock {
fn new(bit_stream: &mut BitStream) -> DynamicBlock {
let hlit = bit_stream.next_bits(5) as usize + 257;
let hdist = bit_stream.next_bits(5) as usize + 1;
let hclen = bit_stream.next_bits(4) as usize + 4;
println!("hlit : {}", hlit);
println!("hdist : {}", hdist);
println!("hclen : {}", hclen);
// Read this first, this is next in the format and contains the code length
// compression tree to decode the literal/length huffman and the distance huffman
let code_length_huffman = DynamicBlock::read_code_length_huffman(hclen, bit_stream);
// Use code_length_huffman to build the literal_length and distance huffmans
let literal_length_huffman = DynamicBlock::read_literal_length_huffman(hlit, bit_stream, &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0... 3 => 0,
4... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257... 264 => 0,
265... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257... 264 => value - 254,
265... 268 => 11 + 2 * (value - 265),
269... 272 => 19 + 4 * (value - 269),
273... 276 => 35 + 8 * (value - 273),
277... 280 => 67 + 16 * (value - 277),
281... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct | <'a> {
// The following two fields are used to manage input/output
buffered_writer: BufferedWriter,
bit_stream: BitStream<'a>,
// The following two fields control if we read another block
has_seen_final_block: bool,
current_block: Option<DynamicBlock>,
}
const BTYPE_DYNAMIC : u8 = 0b10;
impl <'a> DEFLATEReader<'a> {
pub fn new<T : Source<u8> + 'a>(input: &'a mut T) -> DEFLATEReader<'a> {
DEFLATEReader{
buffered_writer: BufferedWriter::new(),
bit_stream: BitStream::new(input),
has_seen_final_block: false,
current_block: None,
}
}
}
impl <'a> Source<Option<u8>> for DEFLATEReader<'a> {
fn next(self: &mut DEFLATEReader<'a>) -> Option<u8> {
// We set this field when we have finished with a block or haven't started yet
// this field tells us that we should begin reading a new block which involves
// decoding all the headers and huffman trees
if self.current_block.is_none() {
// We have already fully consumed the final block
if self.has_seen_final_block {
return None
}
// Is this the final block?
let b_final = self.bit_stream.next();
println!("b_final: {}", b_final);
self.has_seen_final_block = b_final;
// Is what type of block is this?
let b_type = self.bit_stream.next_bits(2) as u8;
println!("b_type : {:02b}", b_type);
self.current_block = match b_type {
BTYPE_DYNAMIC => Some(DynamicBlock::new(&mut self.bit_stream)),
_ => panic!("Unhandled block type: {:02b}", b_type),
};
}
// The above actions should have established a current block if one was needed
assert!(self.current_block.is_some());
match self.current_block.as_mut() {
Some(block) => match block.next(&mut self.bit_stream) {
DEFLATEResult::Literal(byte) => {
println!("Is Literal..");
self.buffered_writer.write(byte);
Some(byte)
},
DEFLATEResult::EndOfBlock => {
println!("Is EOB..");
self.current_block = None;
let byte = self.next();
println!("Continuing EOB");
// We probably don't need the following because
// the other branches handle writing already
// if byte.is_some() {
// self.buffered_writer.write(byte.unwrap());
// }
byte
},
DEFLATEResult::Repeat(distance) => {
println!("Is Repeat..");
Some(self.buffered_writer.repeat(distance))
},
},
None => panic!("We should have assured not none above"),
}
}
}
| DEFLATEReader | identifier_name |
deflate.rs | use super::Source;
use super::huffman::Huffman;
use super::bitstream::BitStream;
use super::bufferedwriter::BufferedWriter;
struct DynamicBlock {
// If we encounter a repeat, we should set our state so that we keep producing
// repeats until they run out, instead of reading from the bit stream.
repeats_remaining: usize,
last_repeat_distance: usize,
// Used to determine next byte from next bit(s) in stream
literal_length_huffman: Huffman<usize>,
distance_huffman: Huffman<usize>,
}
impl DynamicBlock {
fn new(bit_stream: &mut BitStream) -> DynamicBlock {
let hlit = bit_stream.next_bits(5) as usize + 257;
let hdist = bit_stream.next_bits(5) as usize + 1;
let hclen = bit_stream.next_bits(4) as usize + 4;
println!("hlit : {}", hlit);
println!("hdist : {}", hdist);
println!("hclen : {}", hclen);
// Read this first, this is next in the format and contains the code length
// compression tree to decode the literal/length huffman and the distance huffman
let code_length_huffman = DynamicBlock::read_code_length_huffman(hclen, bit_stream);
// Use code_length_huffman to build the literal_length and distance huffmans
let literal_length_huffman = DynamicBlock::read_literal_length_huffman(hlit, bit_stream, &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0... 3 => 0,
4... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257... 264 => 0,
265... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257... 264 => value - 254,
265... 268 => 11 + 2 * (value - 265),
269... 272 => 19 + 4 * (value - 269),
273... 276 => 35 + 8 * (value - 273),
277... 280 => 67 + 16 * (value - 277),
281... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman); | println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct DEFLATEReader<'a> {
// The following two fields are used to manage input/output
buffered_writer: BufferedWriter,
bit_stream: BitStream<'a>,
// The following two fields control if we read another block
has_seen_final_block: bool,
current_block: Option<DynamicBlock>,
}
const BTYPE_DYNAMIC : u8 = 0b10;
impl <'a> DEFLATEReader<'a> {
pub fn new<T : Source<u8> + 'a>(input: &'a mut T) -> DEFLATEReader<'a> {
DEFLATEReader{
buffered_writer: BufferedWriter::new(),
bit_stream: BitStream::new(input),
has_seen_final_block: false,
current_block: None,
}
}
}
impl <'a> Source<Option<u8>> for DEFLATEReader<'a> {
fn next(self: &mut DEFLATEReader<'a>) -> Option<u8> {
// We set this field when we have finished with a block or haven't started yet
// this field tells us that we should begin reading a new block which involves
// decoding all the headers and huffman trees
if self.current_block.is_none() {
// We have already fully consumed the final block
if self.has_seen_final_block {
return None
}
// Is this the final block?
let b_final = self.bit_stream.next();
println!("b_final: {}", b_final);
self.has_seen_final_block = b_final;
// Is what type of block is this?
let b_type = self.bit_stream.next_bits(2) as u8;
println!("b_type : {:02b}", b_type);
self.current_block = match b_type {
BTYPE_DYNAMIC => Some(DynamicBlock::new(&mut self.bit_stream)),
_ => panic!("Unhandled block type: {:02b}", b_type),
};
}
// The above actions should have established a current block if one was needed
assert!(self.current_block.is_some());
match self.current_block.as_mut() {
Some(block) => match block.next(&mut self.bit_stream) {
DEFLATEResult::Literal(byte) => {
println!("Is Literal..");
self.buffered_writer.write(byte);
Some(byte)
},
DEFLATEResult::EndOfBlock => {
println!("Is EOB..");
self.current_block = None;
let byte = self.next();
println!("Continuing EOB");
// We probably don't need the following because
// the other branches handle writing already
// if byte.is_some() {
// self.buffered_writer.write(byte.unwrap());
// }
byte
},
DEFLATEResult::Repeat(distance) => {
println!("Is Repeat..");
Some(self.buffered_writer.repeat(distance))
},
},
None => panic!("We should have assured not none above"),
}
}
} | let result = Huffman::new(&alphabet, &lengths); | random_line_split |
deflate.rs | use super::Source;
use super::huffman::Huffman;
use super::bitstream::BitStream;
use super::bufferedwriter::BufferedWriter;
struct DynamicBlock {
// If we encounter a repeat, we should set our state so that we keep producing
// repeats until they run out, instead of reading from the bit stream.
repeats_remaining: usize,
last_repeat_distance: usize,
// Used to determine next byte from next bit(s) in stream
literal_length_huffman: Huffman<usize>,
distance_huffman: Huffman<usize>,
}
impl DynamicBlock {
fn new(bit_stream: &mut BitStream) -> DynamicBlock {
let hlit = bit_stream.next_bits(5) as usize + 257;
let hdist = bit_stream.next_bits(5) as usize + 1;
let hclen = bit_stream.next_bits(4) as usize + 4;
println!("hlit : {}", hlit);
println!("hdist : {}", hdist);
println!("hclen : {}", hclen);
// Read this first, this is next in the format and contains the code length
// compression tree to decode the literal/length huffman and the distance huffman
let code_length_huffman = DynamicBlock::read_code_length_huffman(hclen, bit_stream);
// Use code_length_huffman to build the literal_length and distance huffmans
let literal_length_huffman = DynamicBlock::read_literal_length_huffman(hlit, bit_stream, &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0... 3 => 0,
4... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257... 264 => 0,
265... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257... 264 => value - 254,
265... 268 => 11 + 2 * (value - 265),
269... 272 => 19 + 4 * (value - 269),
273... 276 => 35 + 8 * (value - 273),
277... 280 => 67 + 16 * (value - 277),
281... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else |
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct DEFLATEReader<'a> {
// The following two fields are used to manage input/output
buffered_writer: BufferedWriter,
bit_stream: BitStream<'a>,
// The following two fields control if we read another block
has_seen_final_block: bool,
current_block: Option<DynamicBlock>,
}
const BTYPE_DYNAMIC : u8 = 0b10;
impl <'a> DEFLATEReader<'a> {
pub fn new<T : Source<u8> + 'a>(input: &'a mut T) -> DEFLATEReader<'a> {
DEFLATEReader{
buffered_writer: BufferedWriter::new(),
bit_stream: BitStream::new(input),
has_seen_final_block: false,
current_block: None,
}
}
}
impl <'a> Source<Option<u8>> for DEFLATEReader<'a> {
fn next(self: &mut DEFLATEReader<'a>) -> Option<u8> {
// We set this field when we have finished with a block or haven't started yet
// this field tells us that we should begin reading a new block which involves
// decoding all the headers and huffman trees
if self.current_block.is_none() {
// We have already fully consumed the final block
if self.has_seen_final_block {
return None
}
// Is this the final block?
let b_final = self.bit_stream.next();
println!("b_final: {}", b_final);
self.has_seen_final_block = b_final;
// Is what type of block is this?
let b_type = self.bit_stream.next_bits(2) as u8;
println!("b_type : {:02b}", b_type);
self.current_block = match b_type {
BTYPE_DYNAMIC => Some(DynamicBlock::new(&mut self.bit_stream)),
_ => panic!("Unhandled block type: {:02b}", b_type),
};
}
// The above actions should have established a current block if one was needed
assert!(self.current_block.is_some());
match self.current_block.as_mut() {
Some(block) => match block.next(&mut self.bit_stream) {
DEFLATEResult::Literal(byte) => {
println!("Is Literal..");
self.buffered_writer.write(byte);
Some(byte)
},
DEFLATEResult::EndOfBlock => {
println!("Is EOB..");
self.current_block = None;
let byte = self.next();
println!("Continuing EOB");
// We probably don't need the following because
// the other branches handle writing already
// if byte.is_some() {
// self.buffered_writer.write(byte.unwrap());
// }
byte
},
DEFLATEResult::Repeat(distance) => {
println!("Is Repeat..");
Some(self.buffered_writer.repeat(distance))
},
},
None => panic!("We should have assured not none above"),
}
}
}
| {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
} | conditional_block |
main.rs | // #! global
// #![warn()] enable
// #![allow()] disable
#![allow(non_snake_case)]
#![allow(dead_code)]
use hmac::Hmac;
use http::method;
use openssl::{hash, pkcs5::pbkdf2_hmac};
use reqwest::Client;
use std::{borrow::Borrow, collections::{HashMap, VecDeque}, default, ops::Index};
use std::{fs::File, usize};
// use sha::{Sha1, sha1};
use std::str::from_utf8;
// use hmac::{Hmac, Mac, NewMac};
use chrono::Utc;
use crypto::{digest::Digest, hmac, mac::Mac, md5, sha1};
use hyper::{Request, header::Keys, http};
use log::{info}
#[derive(Default,Debug)]
struct UpYunConfig {
Bucket: String,
Operator: String,
Password: String,
Secret: String,
Hosts: HashMap<String, String>,
UserAgent: String,
}
#[derive(Default,Debug)]
struct UpYun {
UpYunConfig: UpYunConfig,
httpc: String,
deprecated: bool,
}
#[derive(Default)]
struct RestReqConfig {
method: String,
uri: String,
query: String,
headers: HashMap<String, String>,
closeBody: bool,
httpBody: String,
useMD5: bool,
}
#[derive(Default)]
struct RestAuthConfig {
method: String,
uri: String,
DateStr: String,
LengthStr: String,
}
#[derive(Default,Debug)]
struct PutObjectConfig {
path: String,
local_path: String,
reader: Vec<u8>,
headers: HashMap<String,String>,
use_md5: bool,
user_resume_upload: bool,
resume_partsize: i64,
max_resume_put_tries: i32,
}
impl PutObjectConfig {
fn new() -> Self{
PutObjectConfig{
..Default::default()
}
}
fn build(self) -> Self{
self
}
fn set_path(mut self, path: String) -> Self{
self.path = path;
self
}
fn set_local_path(mut self, local_path: String) -> Self{
self.local_path = local_path;
self
}
}
impl RestReqConfig {
fn new() -> Self {
RestReqConfig {
..Default::default()
}
}
fn set_method(mut self, method: String) -> Self {
self.method = method;
self
}
fn set_uri(mut self, uri: String) -> Self {
self.uri = uri;
self
}
fn set_query(mut self, query: String) -> Self {
self.query = query;
self
}
fn set_headers(mut self, headers: HashMap<String, String>) -> Self {
self.headers = headers;
self
}
fn set_close(mut self, close: bool) -> Self {
self.closeBody = close;
self
}
fn set_usemd5(mut self, usemd5: bool) -> Self {
self.useMD5 = usemd5;
self
}
fn build(self) -> Self {
self
}
}
impl UpYunConfig {
fn new(Bucket: String, Operator: String, Password: String) -> Self{
UpYunConfig{
Bucket:Bucket,
Operator:Operator,
Password: Password,
..Default::default()
}
}
fn build(mut self) -> Self {
self
}
}
impl UpYun {
fn new(config: UpYunConfig) -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath!= "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err!= nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path!= ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path!= "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: hyper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query!= "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if!has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
} | }
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) ||!ishex(s[i+1]) ||!ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() ||!ishex(s_vec[_i + 1] as u8) ||!ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else if 'A' as u8 <= c && c <= 'F' as u8 {
c - 'A' as u8 + 10
} else {
0
}
}
// 判断是否为16进制
fn ishex(c: u8) -> bool {
if '0' as u8 <= c && c <= '9' as u8 {
true
} else if 'a' as u8 <= c && c <= 'f' as u8 {
true
} else if 'A' as u8 <= c && c <= 'F' as u8 {
true
} else {
false
}
}
// 使用sha-1加密内容 在hmac 一下
// func hmacSha1(key string, data []byte) []byte {
// hm := hmac.New(sha1.New, []byte(key))
// hm.Write(data)
// return hm.Sum(nil)
// }
fn hmacSha1(key: &[u8], value: &[u8]) -> String {
// // 先把秘钥加密一下 类似md5 只是算法不同
// let mut hasher = crypto::sha1::Sha1::new();
// hasher.input_str(&key);
// let result = hasher.result_str().as_bytes();
// let rr = vec![0u8;20];
// rr.copy_from_slice(&result);
// 再把加密后的内容和value 一起hmac一下
// let h_mac = NewMac::new(&result)
let mut mac = hmac::Hmac::new(crypto::sha1::Sha1::new(), key);
mac.input(value);
let result = mac.result();
let code = result.code();
// The correct hash is returned, it's just not in the representation you expected. The hash is returned as raw bytes, not as bytes converted to ASCII hexadecimal digits.
// If we print the hash code array as hex, like this
// println!("{:02x?}", code);
let code_vec = code
.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>();
code_vec.concat()
}
// func makeRFC1123Date(d time.Time) string {
// utc := d.UTC().Format(time.RFC1123)
// return strings.ReplaceAll(utc, "UTC", "GMT")
// }
fn makeRFC1123Date() -> String {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
new_time_utf
}
// base64 to string
// base64::decode_block(src)
#[cfg(test)]
mod tests {
use chrono::{Date, DateTime, Utc};
use hyper::http;
use std::{collections::HashMap, io::Read};
use crate::escapeUri;
use crate::hmacSha1;
use crate::makeRFC1123Date;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn parse_uri() {
let bucket = String::from("sdk-test");
let config = "/xx/中文.log/".to_string();
let query = "xxx";
let mut escUri = String::from("/") + &bucket + &escapeUri("/xx/中文.log".to_string());
if config.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if query!= "" {
escUri += ("?".to_owned() + query).as_str()
}
// header set
// hasmd5 set
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
// headers["Date"] = makeRFC1123Date(time.Now());
// headers["Host"] = "v0.api.upyun.com"
if!has_md5 {
// 判断类型
}
let deprecated = "";
// if deprecated {}
if let Some(value) = headers.get("Content-Length") {
let size = 0;
}
}
// use crate::base64ToStr;
#[test]
fn make_unified_auth() {
let sign: Vec<&'static str> = vec!["method", "uri", "DateStr", "Policy", "ContentMD5"];
let mut sign_no_empty: Vec<String> = Vec::new();
for item in sign {
if item!= "" {
sign_no_empty.push(item.to_string());
}
}
let sign_bytes = sign_no_empty.join("&");
let password = "xx".as_bytes();
let sign_str =
openssl::base64::encode_block(hmacSha1(password, sign_bytes.as_bytes()).as_bytes());
let back_vec: Vec<String> = vec![
"Upyun".to_string(),
"Operator".to_string(),
":".to_string(),
sign_str,
];
let _back_str = back_vec.concat();
}
#[test]
fn hmac_test() {
let value = "xx".as_bytes();
let key = "yy".as_bytes();
assert_eq!(
"3124cf1daef6d713c312065988652d8b7fca587e".to_string(),
hmacSha1(key, value)
)
}
#[test]
fn makeRFC1123Date_test() {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
println!("{:?}", new_time_utf);
}
use crypto::{digest::Digest, md5::Md5};
#[test]
fn md5str() {
// 关于md5需要介绍的东西
// 因为直接md5转换后得到的东西会很长, 所以一般会把它/4 也就是算一个16进制
let s = "xx".to_string();
// create
let mut hasher = Md5::new();
hasher.input_str(&s);
let xx = hasher.result_str();
println!("{:?}", xx);
}
// using hyper
#[test]
fn hyper_test(){
let method = http::Method::GET;
let url = "http://www.baidu.com";
let headers: HashMap<String, String> = HashMap::new();
let body = [0u8;32];
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
println!("{:?}{:?}",&key, &value);
if key.to_lowercase() == "host"{
// req.
} else {
println!("{:?}", req.headers())
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
}
fn main() {
let BUCKET = "sdk-test".to_string();
let CNAME = "sdk-test.b0-aicdn.com".to_string();
let USER = "sdk".to_string();
let PASSWORD = "IwwEqRmUgs29IdNOzDT3ePFz7Q9iMT5m".to_string();
//
let up = UpYun::new(UpYunConfig::new(BUCKET, USER, PASSWORD).build()).build();
let put_object_config = PutObjectConfig::new().set_local_path("".to_string()).set_path("/xx/中文.log".to_string()).build();
println!("{:?}",up);
// let rqc = RestReqConfig::new()
// .set_uri("/xx/中文.log".to_string())
// .build();
} |
}
Ok(())
} | random_line_split |
main.rs | // #! global
// #![warn()] enable
// #![allow()] disable
#![allow(non_snake_case)]
#![allow(dead_code)]
use hmac::Hmac;
use http::method;
use openssl::{hash, pkcs5::pbkdf2_hmac};
use reqwest::Client;
use std::{borrow::Borrow, collections::{HashMap, VecDeque}, default, ops::Index};
use std::{fs::File, usize};
// use sha::{Sha1, sha1};
use std::str::from_utf8;
// use hmac::{Hmac, Mac, NewMac};
use chrono::Utc;
use crypto::{digest::Digest, hmac, mac::Mac, md5, sha1};
use hyper::{Request, header::Keys, http};
use log::{info}
#[derive(Default,Debug)]
struct UpYunConfig {
Bucket: String,
Operator: String,
Password: String,
Secret: String,
Hosts: HashMap<String, String>,
UserAgent: String,
}
#[derive(Default,Debug)]
struct UpYun {
UpYunConfig: UpYunConfig,
httpc: String,
deprecated: bool,
}
#[derive(Default)]
struct RestReqConfig {
method: String,
uri: String,
query: String,
headers: HashMap<String, String>,
closeBody: bool,
httpBody: String,
useMD5: bool,
}
#[derive(Default)]
struct RestAuthConfig {
method: String,
uri: String,
DateStr: String,
LengthStr: String,
}
#[derive(Default,Debug)]
struct PutObjectConfig {
path: String,
local_path: String,
reader: Vec<u8>,
headers: HashMap<String,String>,
use_md5: bool,
user_resume_upload: bool,
resume_partsize: i64,
max_resume_put_tries: i32,
}
impl PutObjectConfig {
fn new() -> Self{
PutObjectConfig{
..Default::default()
}
}
fn build(self) -> Self{
self
}
fn set_path(mut self, path: String) -> Self{
self.path = path;
self
}
fn set_local_path(mut self, local_path: String) -> Self{
self.local_path = local_path;
self
}
}
impl RestReqConfig {
fn new() -> Self {
RestReqConfig {
..Default::default()
}
}
fn set_method(mut self, method: String) -> Self {
self.method = method;
self
}
fn set_uri(mut self, uri: String) -> Self {
self.uri = uri;
self
}
fn set_query(mut self, query: String) -> Self {
self.query = query;
self
}
fn set_headers(mut self, headers: HashMap<String, String>) -> Self {
self.headers = headers;
self
}
fn set_close(mut self, close: bool) -> Self {
self.closeBody = close;
self
}
fn set_usemd5(mut self, usemd5: bool) -> Self {
self.useMD5 = usemd5;
self
}
fn build(self) -> Self {
self
}
}
impl UpYunConfig {
fn new(Bucket: String, Operator: String, Password: String) -> Self{
UpYunConfig{
Bucket:Bucket,
Operator:Operator,
Password: Password,
..Default::default()
}
}
fn build(mut self) -> Self {
self
}
}
impl UpYun {
fn new(config: UpYunConfig) -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath!= "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err!= nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path!= ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path!= "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
| yper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query!= "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if!has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
}
}
Ok(())
}
}
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) ||!ishex(s[i+1]) ||!ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() ||!ishex(s_vec[_i + 1] as u8) ||!ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else if 'A' as u8 <= c && c <= 'F' as u8 {
c - 'A' as u8 + 10
} else {
0
}
}
// 判断是否为16进制
fn ishex(c: u8) -> bool {
if '0' as u8 <= c && c <= '9' as u8 {
true
} else if 'a' as u8 <= c && c <= 'f' as u8 {
true
} else if 'A' as u8 <= c && c <= 'F' as u8 {
true
} else {
false
}
}
// 使用sha-1加密内容 在hmac 一下
// func hmacSha1(key string, data []byte) []byte {
// hm := hmac.New(sha1.New, []byte(key))
// hm.Write(data)
// return hm.Sum(nil)
// }
fn hmacSha1(key: &[u8], value: &[u8]) -> String {
// // 先把秘钥加密一下 类似md5 只是算法不同
// let mut hasher = crypto::sha1::Sha1::new();
// hasher.input_str(&key);
// let result = hasher.result_str().as_bytes();
// let rr = vec![0u8;20];
// rr.copy_from_slice(&result);
// 再把加密后的内容和value 一起hmac一下
// let h_mac = NewMac::new(&result)
let mut mac = hmac::Hmac::new(crypto::sha1::Sha1::new(), key);
mac.input(value);
let result = mac.result();
let code = result.code();
// The correct hash is returned, it's just not in the representation you expected. The hash is returned as raw bytes, not as bytes converted to ASCII hexadecimal digits.
// If we print the hash code array as hex, like this
// println!("{:02x?}", code);
let code_vec = code
.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>();
code_vec.concat()
}
// func makeRFC1123Date(d time.Time) string {
// utc := d.UTC().Format(time.RFC1123)
// return strings.ReplaceAll(utc, "UTC", "GMT")
// }
fn makeRFC1123Date() -> String {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
new_time_utf
}
// base64 to string
// base64::decode_block(src)
#[cfg(test)]
mod tests {
use chrono::{Date, DateTime, Utc};
use hyper::http;
use std::{collections::HashMap, io::Read};
use crate::escapeUri;
use crate::hmacSha1;
use crate::makeRFC1123Date;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn parse_uri() {
let bucket = String::from("sdk-test");
let config = "/xx/中文.log/".to_string();
let query = "xxx";
let mut escUri = String::from("/") + &bucket + &escapeUri("/xx/中文.log".to_string());
if config.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if query!= "" {
escUri += ("?".to_owned() + query).as_str()
}
// header set
// hasmd5 set
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
// headers["Date"] = makeRFC1123Date(time.Now());
// headers["Host"] = "v0.api.upyun.com"
if!has_md5 {
// 判断类型
}
let deprecated = "";
// if deprecated {}
if let Some(value) = headers.get("Content-Length") {
let size = 0;
}
}
// use crate::base64ToStr;
#[test]
fn make_unified_auth() {
let sign: Vec<&'static str> = vec!["method", "uri", "DateStr", "Policy", "ContentMD5"];
let mut sign_no_empty: Vec<String> = Vec::new();
for item in sign {
if item!= "" {
sign_no_empty.push(item.to_string());
}
}
let sign_bytes = sign_no_empty.join("&");
let password = "xx".as_bytes();
let sign_str =
openssl::base64::encode_block(hmacSha1(password, sign_bytes.as_bytes()).as_bytes());
let back_vec: Vec<String> = vec![
"Upyun".to_string(),
"Operator".to_string(),
":".to_string(),
sign_str,
];
let _back_str = back_vec.concat();
}
#[test]
fn hmac_test() {
let value = "xx".as_bytes();
let key = "yy".as_bytes();
assert_eq!(
"3124cf1daef6d713c312065988652d8b7fca587e".to_string(),
hmacSha1(key, value)
)
}
#[test]
fn makeRFC1123Date_test() {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
println!("{:?}", new_time_utf);
}
use crypto::{digest::Digest, md5::Md5};
#[test]
fn md5str() {
// 关于md5需要介绍的东西
// 因为直接md5转换后得到的东西会很长, 所以一般会把它/4 也就是算一个16进制
let s = "xx".to_string();
// create
let mut hasher = Md5::new();
hasher.input_str(&s);
let xx = hasher.result_str();
println!("{:?}", xx);
}
// using hyper
#[test]
fn hyper_test(){
let method = http::Method::GET;
let url = "http://www.baidu.com";
let headers: HashMap<String, String> = HashMap::new();
let body = [0u8;32];
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
println!("{:?}{:?}",&key, &value);
if key.to_lowercase() == "host"{
// req.
} else {
println!("{:?}", req.headers())
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
}
fn main() {
let BUCKET = "sdk-test".to_string();
let CNAME = "sdk-test.b0-aicdn.com".to_string();
let USER = "sdk".to_string();
let PASSWORD = "IwwEqRmUgs29IdNOzDT3ePFz7Q9iMT5m".to_string();
//
let up = UpYun::new(UpYunConfig::new(BUCKET, USER, PASSWORD).build()).build();
let put_object_config = PutObjectConfig::new().set_local_path("".to_string()).set_path("/xx/中文.log".to_string()).build();
println!("{:?}",up);
// let rqc = RestReqConfig::new()
// .set_uri("/xx/中文.log".to_string())
// .build();
}
| Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: h | identifier_body |
main.rs | // #! global
// #![warn()] enable
// #![allow()] disable
#![allow(non_snake_case)]
#![allow(dead_code)]
use hmac::Hmac;
use http::method;
use openssl::{hash, pkcs5::pbkdf2_hmac};
use reqwest::Client;
use std::{borrow::Borrow, collections::{HashMap, VecDeque}, default, ops::Index};
use std::{fs::File, usize};
// use sha::{Sha1, sha1};
use std::str::from_utf8;
// use hmac::{Hmac, Mac, NewMac};
use chrono::Utc;
use crypto::{digest::Digest, hmac, mac::Mac, md5, sha1};
use hyper::{Request, header::Keys, http};
use log::{info}
#[derive(Default,Debug)]
struct UpYunConfig {
Bucket: String,
Operator: String,
Password: String,
Secret: String,
Hosts: HashMap<String, String>,
UserAgent: String,
}
#[derive(Default,Debug)]
struct UpYun {
UpYunConfig: UpYunConfig,
httpc: String,
deprecated: bool,
}
#[derive(Default)]
struct RestReqConfig {
method: String,
uri: String,
query: String,
headers: HashMap<String, String>,
closeBody: bool,
httpBody: String,
useMD5: bool,
}
#[derive(Default)]
struct RestAuthConfig {
method: String,
uri: String,
DateStr: String,
LengthStr: String,
}
#[derive(Default,Debug)]
struct PutObjectConfig {
path: String,
local_path: String,
reader: Vec<u8>,
headers: HashMap<String,String>,
use_md5: bool,
user_resume_upload: bool,
resume_partsize: i64,
max_resume_put_tries: i32,
}
impl PutObjectConfig {
fn new() -> Self{
PutObjectConfig{
..Default::default()
}
}
fn build(self) -> Self{
self
}
fn set_path(mut self, path: String) -> Self{
self.path = path;
self
}
fn set_local_path(mut self, local_path: String) -> Self{
self.local_path = local_path;
self
}
}
impl RestReqConfig {
fn new() -> Self {
RestReqConfig {
..Default::default()
}
}
fn set_method(mut self, method: String) -> Self {
self.method = method;
self
}
fn set_uri(mut self, uri: String) -> Self {
self.uri = uri;
self
}
fn set_query(mut self, query: String) -> Self {
self.query = query;
self
}
fn set_headers(mut self, headers: HashMap<String, String>) -> Self {
self.headers = headers;
self
}
fn set_close(mut self, close: bool) -> Self {
self.closeBody = close;
self
}
fn set_usemd5(mut self, usemd5: bool) -> Self {
self.useMD5 = usemd5;
self
}
fn | (self) -> Self {
self
}
}
impl UpYunConfig {
fn new(Bucket: String, Operator: String, Password: String) -> Self{
UpYunConfig{
Bucket:Bucket,
Operator:Operator,
Password: Password,
..Default::default()
}
}
fn build(mut self) -> Self {
self
}
}
impl UpYun {
fn new(config: UpYunConfig) -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath!= "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err!= nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path!= ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path!= "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: hyper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query!= "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if!has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
}
}
Ok(())
}
}
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) ||!ishex(s[i+1]) ||!ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() ||!ishex(s_vec[_i + 1] as u8) ||!ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else if 'A' as u8 <= c && c <= 'F' as u8 {
c - 'A' as u8 + 10
} else {
0
}
}
// 判断是否为16进制
fn ishex(c: u8) -> bool {
if '0' as u8 <= c && c <= '9' as u8 {
true
} else if 'a' as u8 <= c && c <= 'f' as u8 {
true
} else if 'A' as u8 <= c && c <= 'F' as u8 {
true
} else {
false
}
}
// 使用sha-1加密内容 在hmac 一下
// func hmacSha1(key string, data []byte) []byte {
// hm := hmac.New(sha1.New, []byte(key))
// hm.Write(data)
// return hm.Sum(nil)
// }
fn hmacSha1(key: &[u8], value: &[u8]) -> String {
// // 先把秘钥加密一下 类似md5 只是算法不同
// let mut hasher = crypto::sha1::Sha1::new();
// hasher.input_str(&key);
// let result = hasher.result_str().as_bytes();
// let rr = vec![0u8;20];
// rr.copy_from_slice(&result);
// 再把加密后的内容和value 一起hmac一下
// let h_mac = NewMac::new(&result)
let mut mac = hmac::Hmac::new(crypto::sha1::Sha1::new(), key);
mac.input(value);
let result = mac.result();
let code = result.code();
// The correct hash is returned, it's just not in the representation you expected. The hash is returned as raw bytes, not as bytes converted to ASCII hexadecimal digits.
// If we print the hash code array as hex, like this
// println!("{:02x?}", code);
let code_vec = code
.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>();
code_vec.concat()
}
// func makeRFC1123Date(d time.Time) string {
// utc := d.UTC().Format(time.RFC1123)
// return strings.ReplaceAll(utc, "UTC", "GMT")
// }
fn makeRFC1123Date() -> String {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
new_time_utf
}
// base64 to string
// base64::decode_block(src)
#[cfg(test)]
mod tests {
use chrono::{Date, DateTime, Utc};
use hyper::http;
use std::{collections::HashMap, io::Read};
use crate::escapeUri;
use crate::hmacSha1;
use crate::makeRFC1123Date;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn parse_uri() {
let bucket = String::from("sdk-test");
let config = "/xx/中文.log/".to_string();
let query = "xxx";
let mut escUri = String::from("/") + &bucket + &escapeUri("/xx/中文.log".to_string());
if config.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if query!= "" {
escUri += ("?".to_owned() + query).as_str()
}
// header set
// hasmd5 set
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
// headers["Date"] = makeRFC1123Date(time.Now());
// headers["Host"] = "v0.api.upyun.com"
if!has_md5 {
// 判断类型
}
let deprecated = "";
// if deprecated {}
if let Some(value) = headers.get("Content-Length") {
let size = 0;
}
}
// use crate::base64ToStr;
#[test]
fn make_unified_auth() {
let sign: Vec<&'static str> = vec!["method", "uri", "DateStr", "Policy", "ContentMD5"];
let mut sign_no_empty: Vec<String> = Vec::new();
for item in sign {
if item!= "" {
sign_no_empty.push(item.to_string());
}
}
let sign_bytes = sign_no_empty.join("&");
let password = "xx".as_bytes();
let sign_str =
openssl::base64::encode_block(hmacSha1(password, sign_bytes.as_bytes()).as_bytes());
let back_vec: Vec<String> = vec![
"Upyun".to_string(),
"Operator".to_string(),
":".to_string(),
sign_str,
];
let _back_str = back_vec.concat();
}
#[test]
fn hmac_test() {
let value = "xx".as_bytes();
let key = "yy".as_bytes();
assert_eq!(
"3124cf1daef6d713c312065988652d8b7fca587e".to_string(),
hmacSha1(key, value)
)
}
#[test]
fn makeRFC1123Date_test() {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
println!("{:?}", new_time_utf);
}
use crypto::{digest::Digest, md5::Md5};
#[test]
fn md5str() {
// 关于md5需要介绍的东西
// 因为直接md5转换后得到的东西会很长, 所以一般会把它/4 也就是算一个16进制
let s = "xx".to_string();
// create
let mut hasher = Md5::new();
hasher.input_str(&s);
let xx = hasher.result_str();
println!("{:?}", xx);
}
// using hyper
#[test]
fn hyper_test(){
let method = http::Method::GET;
let url = "http://www.baidu.com";
let headers: HashMap<String, String> = HashMap::new();
let body = [0u8;32];
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
println!("{:?}{:?}",&key, &value);
if key.to_lowercase() == "host"{
// req.
} else {
println!("{:?}", req.headers())
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
}
fn main() {
let BUCKET = "sdk-test".to_string();
let CNAME = "sdk-test.b0-aicdn.com".to_string();
let USER = "sdk".to_string();
let PASSWORD = "IwwEqRmUgs29IdNOzDT3ePFz7Q9iMT5m".to_string();
//
let up = UpYun::new(UpYunConfig::new(BUCKET, USER, PASSWORD).build()).build();
let put_object_config = PutObjectConfig::new().set_local_path("".to_string()).set_path("/xx/中文.log".to_string()).build();
println!("{:?}",up);
// let rqc = RestReqConfig::new()
// .set_uri("/xx/中文.log".to_string())
// .build();
}
| build | identifier_name |
main.rs | // #! global
// #![warn()] enable
// #![allow()] disable
#![allow(non_snake_case)]
#![allow(dead_code)]
use hmac::Hmac;
use http::method;
use openssl::{hash, pkcs5::pbkdf2_hmac};
use reqwest::Client;
use std::{borrow::Borrow, collections::{HashMap, VecDeque}, default, ops::Index};
use std::{fs::File, usize};
// use sha::{Sha1, sha1};
use std::str::from_utf8;
// use hmac::{Hmac, Mac, NewMac};
use chrono::Utc;
use crypto::{digest::Digest, hmac, mac::Mac, md5, sha1};
use hyper::{Request, header::Keys, http};
use log::{info}
#[derive(Default,Debug)]
struct UpYunConfig {
Bucket: String,
Operator: String,
Password: String,
Secret: String,
Hosts: HashMap<String, String>,
UserAgent: String,
}
#[derive(Default,Debug)]
struct UpYun {
UpYunConfig: UpYunConfig,
httpc: String,
deprecated: bool,
}
#[derive(Default)]
struct RestReqConfig {
method: String,
uri: String,
query: String,
headers: HashMap<String, String>,
closeBody: bool,
httpBody: String,
useMD5: bool,
}
#[derive(Default)]
struct RestAuthConfig {
method: String,
uri: String,
DateStr: String,
LengthStr: String,
}
#[derive(Default,Debug)]
struct PutObjectConfig {
path: String,
local_path: String,
reader: Vec<u8>,
headers: HashMap<String,String>,
use_md5: bool,
user_resume_upload: bool,
resume_partsize: i64,
max_resume_put_tries: i32,
}
impl PutObjectConfig {
fn new() -> Self{
PutObjectConfig{
..Default::default()
}
}
fn build(self) -> Self{
self
}
fn set_path(mut self, path: String) -> Self{
self.path = path;
self
}
fn set_local_path(mut self, local_path: String) -> Self{
self.local_path = local_path;
self
}
}
impl RestReqConfig {
fn new() -> Self {
RestReqConfig {
..Default::default()
}
}
fn set_method(mut self, method: String) -> Self {
self.method = method;
self
}
fn set_uri(mut self, uri: String) -> Self {
self.uri = uri;
self
}
fn set_query(mut self, query: String) -> Self {
self.query = query;
self
}
fn set_headers(mut self, headers: HashMap<String, String>) -> Self {
self.headers = headers;
self
}
fn set_close(mut self, close: bool) -> Self {
self.closeBody = close;
self
}
fn set_usemd5(mut self, usemd5: bool) -> Self {
self.useMD5 = usemd5;
self
}
fn build(self) -> Self {
self
}
}
impl UpYunConfig {
fn new(Bucket: String, Operator: String, Password: String) -> Self{
UpYunConfig{
Bucket:Bucket,
Operator:Operator,
Password: Password,
..Default::default()
}
}
fn build(mut self) -> Self {
self
}
}
impl UpYun {
fn new(config: UpYunConfig) -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath!= "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err!= nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path!= ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path!= "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: hyper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query!= "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if!has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
}
}
Ok(())
}
}
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) ||!ishex(s[i+1]) ||!ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() ||!ishex(s_vec[_i + 1] as u8) ||!ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else if 'A' as u8 <= c && c <= 'F' as u8 {
c - 'A' as u8 + 10
} else {
0
}
}
// 判断是否为16进制
fn ishex(c: u8) -> bool {
if '0' as u8 <= c && c <= '9' as u8 {
true
} else if 'a' as u8 <= c && c <= 'f' as u8 {
true
} else if 'A' as u8 <= c && c <= 'F' as u8 {
true
} else {
false
}
}
// 使用sha-1加密内容 在hmac 一下
// func hmacSha1(key string, data []byte) []byte {
// hm := hmac.New(sha1.New, []byte(key))
// hm.Write(data)
// return hm.Sum(nil)
// }
fn hmacSha1(key: &[u8], value: &[u8]) -> String {
// // 先把秘钥加密一下 类似md5 只是算法不同
// let mut hasher = crypto::sha1::Sha1::new();
// hasher.input_str(&key);
// let result = hasher.result_str().as_bytes();
// let rr = vec![0u8;20];
// rr.copy_from_slice(&result);
// 再把加密后的内容和value 一起hmac一下
// let h_mac = NewMac::new(&result)
let mut mac = hmac::Hmac::new(crypto::sha1::Sha1::new(), key);
mac.input(value);
let result = mac.result();
let code = result.code();
// The correct hash is returned, it's just not in the representation you expected. The hash is returned as raw bytes, not as bytes converted to ASCII hexadecimal digits.
// If we print the hash code array as hex, like this
// println!("{:02x?}", code);
let code_vec = code
.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>();
code_vec.concat()
}
// func makeRFC1123Date(d time.Time) string {
// utc := d.UTC().Format(time.RFC1123)
// return strings.ReplaceAll(utc, "UTC", "GMT")
// }
fn makeRFC1123Date() -> String {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
new_time_utf
}
// base64 to string
// base64::decode_block(src)
#[cfg(test)]
mod tests {
use chrono::{Date, DateTime, Utc};
use hyper::http;
use std::{collections::HashMap, io::Read};
use crate::escapeUri;
use crate::hmacSha1;
use crate::makeRFC1123Date;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn parse_uri() {
let bucket = String::from("sdk-test");
let config = "/xx/中文.log/".to_string();
let query = "xxx";
let mut escUri = String::from("/") + &bucket + &escapeUri("/xx/中文.log".to_string());
if config.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if query!= "" {
escUri += ("?".to_owned() + query).as_str()
}
// header set
// hasmd5 set
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v!= "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
// headers["Date"] = makeRFC1123Date(time.Now());
// headers["Host"] = "v0.api.upyun.com"
if!has_md5 {
// 判断类型
}
let deprecated = "";
// if deprecated {}
if let Some(value) = headers.get("Content-Length") {
let size = 0;
}
}
// use crate::base64ToStr;
#[test]
fn make_unified_auth() {
let sign: Vec<&'static str> = vec!["method", "uri", "DateStr", "Policy", "ContentMD5"];
let mut sign_no_empty: Vec<String> = Vec::new();
for item in sign {
if item!= "" {
sign_no_empty.push(item.to_string());
}
}
let sign_bytes = sign_no_empty.join("&");
let password = "xx".as_bytes();
let sign_str =
openssl::base64::encode_block(hmacSha1(password, sign_bytes.as_bytes()).as_bytes());
let back_vec: Vec<St | .to_string(),
":".to_string(),
sign_str,
];
let _back_str = back_vec.concat();
}
#[test]
fn hmac_test() {
let value = "xx".as_bytes();
let key = "yy".as_bytes();
assert_eq!(
"3124cf1daef6d713c312065988652d8b7fca587e".to_string(),
hmacSha1(key, value)
)
}
#[test]
fn makeRFC1123Date_test() {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
println!("{:?}", new_time_utf);
}
use crypto::{digest::Digest, md5::Md5};
#[test]
fn md5str() {
// 关于md5需要介绍的东西
// 因为直接md5转换后得到的东西会很长, 所以一般会把它/4 也就是算一个16进制
let s = "xx".to_string();
// create
let mut hasher = Md5::new();
hasher.input_str(&s);
let xx = hasher.result_str();
println!("{:?}", xx);
}
// using hyper
#[test]
fn hyper_test(){
let method = http::Method::GET;
let url = "http://www.baidu.com";
let headers: HashMap<String, String> = HashMap::new();
let body = [0u8;32];
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
println!("{:?}{:?}",&key, &value);
if key.to_lowercase() == "host"{
// req.
} else {
println!("{:?}", req.headers())
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
}
fn main() {
let BUCKET = "sdk-test".to_string();
let CNAME = "sdk-test.b0-aicdn.com".to_string();
let USER = "sdk".to_string();
let PASSWORD = "IwwEqRmUgs29IdNOzDT3ePFz7Q9iMT5m".to_string();
//
let up = UpYun::new(UpYunConfig::new(BUCKET, USER, PASSWORD).build()).build();
let put_object_config = PutObjectConfig::new().set_local_path("".to_string()).set_path("/xx/中文.log".to_string()).build();
println!("{:?}",up);
// let rqc = RestReqConfig::new()
// .set_uri("/xx/中文.log".to_string())
// .build();
}
| ring> = vec![
"Upyun".to_string(),
"Operator" | conditional_block |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &=!0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else |
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| {
try!(remove_file(&child.path()));
} | conditional_block |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &=!0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) { | self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
} | random_line_split |
|
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd |
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &=!0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| {
self.fd
} | identifier_body |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &=!0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn | <P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| create_dir_all | identifier_name |
localization.rs | may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> +'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c!= &START_ISOLATE && c!= &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> +'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref()!= Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next!= self.resolved;
self.resolved = next;
result
*/ ////
} else |
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl | {
false
} | conditional_block |
localization.rs | may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> +'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c!= &START_ISOLATE && c!= &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn | (
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> +'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref()!= Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next!= self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl | with_arg | identifier_name |
localization.rs | you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> +'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c!= &START_ISOLATE && c!= &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key) | }
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> +'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref()!= Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next!= self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T | */ //// | random_line_split |
localization.rs | an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> +'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c!= &START_ISOLATE && c!= &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> +'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref()!= Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next!= self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T> core::fmt::Debug for ArgSource<T> { ////
fn fmt(&self, _fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result | {
// TODO
Ok(())
} | identifier_body |
|
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if!owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn | (filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| distributed_file_path | identifier_name |
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if!owners.contains(potential_owner) |
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| {
return Ok(potential_owner.clone());
} | conditional_block |
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
} |
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if!owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
} | random_line_split |
|
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len()!= 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> |
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if!owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
} | identifier_body |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct Item {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) | ;
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? {
server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>());
server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
}
| {
feed.merge(msg.feed)
} | conditional_block |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct Item {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) {
feed.merge(msg.feed)
};
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? { | server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
} | server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>()); | random_line_split |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct | {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) {
feed.merge(msg.feed)
};
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? {
server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>());
server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
}
| Item | identifier_name |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2!= 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16!= 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt( | );
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len()!= check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier!= check_share.identifier
|| s.iteration_exponent!= check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold!= check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count!= check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if!group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold!= ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
} | &ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier, | random_line_split |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2!= 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16!= 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len()!= check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier!= check_share.identifier
|| s.iteration_exponent!= check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold!= check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count!= check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if!group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold!= ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> | )?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0, | identifier_body |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2!= 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16!= 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() |
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len()!= check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier!= check_share.identifier
|| s.iteration_exponent!= check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold!= check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count!= check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if!group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold!= ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
} | conditional_block |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2!= 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16!= 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn | (mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len()!= check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier!= check_share.identifier
|| s.iteration_exponent!= check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold!= check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count!= check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if!group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold!= ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| decode_mnemonics | identifier_name |
lib.rs | if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
impl<'a> CellHeader<'a> {
// ### Semantics of Low Bits in Neighbors Pointers
//
// If `self.neighbors.next_bit_1` is set, then the cell is allocated, and
// should never be in the free list. If the bit is not set, then this cell
// is free, and must be in the free list (or is in the process of being
// added to the free list).
//
// The `self.neighbors.next` pointer always points to the byte just *after*
// this cell. If the `self.neighbors.next_bit_2` bit is not set, then it
// points to the next cell. If that bit is set, then it points to the
// invalid memory that follows this cell.
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize =!0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE!= 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free &!Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) &!(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data | // properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily consoli | random_line_split |
|
lib.rs | next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize =!0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE!= 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free &!Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) &!(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data
// properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
})
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily consolidate with it
// immediately, whereas the consolidating with the next adjacent
// cell must be delayed, as explained above.
if let Some(prev) = free
.header
.neighbors
.prev()
.and_then(|p| (*p).as_free_cell())
{
free.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(&free.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev.header.neighbors);
}
return;
}
if let Some(next) = free
.header
.neighbors
.next()
.and_then(|n| (*n).as_free_cell())
{
free.next_free_raw.set(next.next_free());
next.next_free_raw.set(free);
next.set_next_free_can_merge();
return;
}
}
// Either we don't want to merge cells for the current policy, or we
// didn't have the opportunity to do any merging with our adjacent
// neighbors. In either case, push this cell onto the front of the
// free list.
let _head = free.insert_into_free_list(head, policy);
});
}
}
pub static ALLOC_BYTES_LEFT: AtomicI32 = AtomicI32::new(imp::SCRATCH_LEN_BYTES as i32);
pub static ALLOC_BYTES_USED: AtomicI32 = AtomicI32::new(0);
pub use imp::OFFSET as ALLOC_PAGE_OFFSET;
unsafe impl GlobalAlloc for N64Alloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
ALLOC_BYTES_LEFT.fetch_sub(layout.size() as i32, Ordering::SeqCst);
ALLOC_BYTES_USED.fetch_add(layout.size() as i32, Ordering::SeqCst);
match self.alloc_impl(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(AllocErr) => ptr::null_mut(),
}
}
unsafe fn | dealloc | identifier_name |
|
lib.rs | if next.is_null() || CellHeader::next_cell_is_invalid(neighbors) {
None
} else {
Some(&*next)
}
}
#[inline]
unsafe fn prev_checked(
_neighbors: &Neighbors<'a, CellHeader<'a>>,
prev: *const CellHeader<'a>,
) -> Option<&'a CellHeader<'a>> {
if prev.is_null() {
None
} else {
Some(&*prev)
}
}
}
#[repr(C)]
#[derive(Debug)]
struct AllocatedCell<'a> {
header: CellHeader<'a>,
}
#[test]
fn allocated_cell_layout() {
assert_eq!(
size_of::<CellHeader>(),
size_of::<AllocatedCell>(),
"Safety and correctness depends on AllocatedCell being the same as CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
#[repr(C)]
#[derive(Debug)]
struct FreeCell<'a> {
header: CellHeader<'a>,
next_free_raw: Cell<*const FreeCell<'a>>,
}
#[test]
fn free_cell_layout() {
assert_eq!(
size_of::<CellHeader>() + Words(1),
size_of::<FreeCell>(),
"Safety and correctness depends on FreeCell being only one word larger than CellHeader"
);
assert_eq!(
core::mem::align_of::<CellHeader>(),
core::mem::align_of::<AllocatedCell>()
);
}
impl<'a> CellHeader<'a> {
// ### Semantics of Low Bits in Neighbors Pointers
//
// If `self.neighbors.next_bit_1` is set, then the cell is allocated, and
// should never be in the free list. If the bit is not set, then this cell
// is free, and must be in the free list (or is in the process of being
// added to the free list).
//
// The `self.neighbors.next` pointer always points to the byte just *after*
// this cell. If the `self.neighbors.next_bit_2` bit is not set, then it
// points to the next cell. If that bit is set, then it points to the
// invalid memory that follows this cell.
fn is_allocated(&self) -> bool {
self.neighbors.get_next_bit_1()
}
fn is_free(&self) -> bool {
!self.is_allocated()
}
fn set_allocated(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_1();
}
fn set_free(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_1();
}
fn next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) -> bool {
neighbors.get_next_bit_2()
}
fn set_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.set_next_bit_2();
}
fn clear_next_cell_is_invalid(neighbors: &Neighbors<'a, Self>) {
neighbors.clear_next_bit_2();
}
fn size(&self) -> Bytes {
let data = unsafe { (self as *const CellHeader<'a>).offset(1) };
let data = data as usize;
let next = self.neighbors.next_unchecked();
let next = next as usize;
Bytes(next - data)
}
fn as_free_cell(&self) -> Option<&FreeCell<'a>> {
if self.is_free() {
Some(unsafe { &*(self as *const CellHeader as *const FreeCell) })
} else {
None
}
}
// Get a pointer to this cell's data without regard to whether this cell is
// allocated or free.
unsafe fn unchecked_data(&self) -> *const u8 {
(self as *const CellHeader).offset(1) as *const u8
}
// Is this cell aligned to the given power-of-2 alignment?
fn is_aligned_to<B: Into<Bytes>>(&self, align: B) -> bool {
let align = align.into();
let data = unsafe { self.unchecked_data() } as usize;
data & (align.0 - 1) == 0
}
}
impl<'a> FreeCell<'a> {
// Low bits in `FreeCell::next_free_raw`.
//
// If `NEXT_FREE_CELL_CAN_MERGE` is set, then the following invariants hold
// true:
//
// * `FreeCell::next_free_raw` (and'd with the mask) is not null.
// * `FreeCell::next_free_raw` is the adjacent `CellHeader::prev_cell_raw`.
//
// Therefore, this free cell can be merged into a single, larger, contiguous
// free cell with its previous neighbor, which is also the next cell in the
// free list.
const NEXT_FREE_CELL_CAN_MERGE: usize = 0b01;
const _RESERVED: usize = 0b10;
const MASK: usize =!0b11;
fn next_free_can_merge(&self) -> bool {
self.next_free_raw.get() as usize & Self::NEXT_FREE_CELL_CAN_MERGE!= 0
}
fn set_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free | Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn clear_next_free_can_merge(&self) {
let next_free = self.next_free_raw.get() as usize;
let next_free = next_free &!Self::NEXT_FREE_CELL_CAN_MERGE;
self.next_free_raw.set(next_free as *const FreeCell);
}
fn next_free(&self) -> *const FreeCell<'a> {
let next_free = self.next_free_raw.get() as usize & Self::MASK;
next_free as *const FreeCell<'a>
}
unsafe fn from_uninitialized(
raw: NonNull<u8>,
_size: Bytes,
next_free: Option<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> *const FreeCell<'a> {
let next_free = next_free.unwrap_or(ptr::null_mut());
let raw = raw.as_ptr() as *mut FreeCell;
ptr::write(
raw,
FreeCell {
header: CellHeader::default(),
next_free_raw: Cell::new(next_free),
},
);
raw
}
fn as_allocated_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &AllocatedCell<'a> {
CellHeader::set_allocated(&self.header.neighbors);
unsafe { &*(self as *const FreeCell as *const AllocatedCell) }
}
// Try and satisfy the given allocation request with this cell.
fn try_alloc<'b>(
&'b self,
previous: &'b Cell<*const FreeCell<'a>>,
alloc_size: Words,
align: Bytes,
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
// First, do a quick check that this cell can hold an allocation of the
// requested size.
let size: Bytes = alloc_size.into();
if self.header.size() < size {
return None;
}
// Next, try and allocate by splitting this cell in two, and returning
// the second half.
//
// We allocate from the end of this cell, rather than the beginning,
// because it allows us to satisfy alignment requests. Since we can
// choose to split at some alignment and return the aligned cell at the
// end.
let next = self.header.neighbors.next_unchecked() as usize;
let split_and_aligned = (next - size.0) &!(align.0 - 1);
let data = unsafe { self.header.unchecked_data() } as usize;
let min_cell_size: Bytes = policy.min_cell_size(alloc_size).into();
if data + size_of::<CellHeader>().0 + min_cell_size.0 <= split_and_aligned {
let split_cell_head = split_and_aligned - size_of::<CellHeader>().0;
let split_cell = unsafe {
&*FreeCell::from_uninitialized(
unchecked_unwrap(NonNull::new(split_cell_head as *mut u8)),
Bytes(next - split_cell_head) - size_of::<CellHeader>(),
None,
policy,
)
};
Neighbors::append(&self.header, &split_cell.header);
self.clear_next_free_can_merge();
if CellHeader::next_cell_is_invalid(&self.header.neighbors) {
CellHeader::clear_next_cell_is_invalid(&self.header.neighbors);
CellHeader::set_next_cell_is_invalid(&split_cell.header.neighbors);
}
return Some(split_cell.as_allocated_cell(policy));
}
// There isn't enough room to split this cell and still satisfy the
// requested allocation. Because of the early check, we know this cell
// is large enough to fit the requested size, but is the cell's data
// properly aligned?
if self.header.is_aligned_to(align) {
previous.set(self.next_free());
let allocated = self.as_allocated_cell(policy);
return Some(allocated);
}
None
}
fn insert_into_free_list<'b>(
&'b self,
head: &'b Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
) -> &'b Cell<*const FreeCell<'a>> {
self.next_free_raw.set(head.get());
head.set(self);
head
}
}
impl<'a> AllocatedCell<'a> {
unsafe fn as_free_cell(&self, _policy: &dyn AllocPolicy<'a>) -> &FreeCell<'a> {
CellHeader::set_free(&self.header.neighbors);
let free: &FreeCell = &*(self as *const AllocatedCell as *const FreeCell);
free.next_free_raw.set(ptr::null_mut());
free
}
fn data(&self) -> *const u8 {
let cell = &self.header as *const CellHeader;
unsafe { cell.offset(1) as *const u8 }
}
}
trait AllocPolicy<'a> {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
fn should_merge_adjacent_free_cells(&self) -> bool;
}
struct LargeAllocPolicy;
static LARGE_ALLOC_POLICY: LargeAllocPolicy = LargeAllocPolicy;
impl LargeAllocPolicy {
const MIN_CELL_SIZE: Words = Words(size_classes::SizeClasses::NUM_SIZE_CLASSES * 2);
}
impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
unsafe fn new_cell_for_free_list(
&self,
size: Words,
align: Bytes,
) -> Result<*const FreeCell<'a>, AllocErr> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
// left over.
let size: Bytes = cmp::max(size.into(), (align + Self::MIN_CELL_SIZE) * Words(2));
let pages: Pages = (size + size_of::<CellHeader>()).round_up_to();
let new_pages = imp::alloc_pages(pages)?;
let allocated_size: Bytes = pages.into();
let free_cell = &*FreeCell::from_uninitialized(
new_pages,
allocated_size - size_of::<CellHeader>(),
None,
self as &dyn AllocPolicy<'a>,
);
let next_cell = (new_pages.as_ptr() as *const u8).add(allocated_size.0);
free_cell
.header
.neighbors
.set_next(next_cell as *const CellHeader);
CellHeader::set_next_cell_is_invalid(&free_cell.header.neighbors);
Ok(free_cell)
}
fn min_cell_size(&self, _alloc_size: Words) -> Words {
Self::MIN_CELL_SIZE
}
fn should_merge_adjacent_free_cells(&self) -> bool {
true
}
}
#[inline]
unsafe fn unchecked_unwrap<T>(o: Option<T>) -> T {
match o {
Some(t) => t,
None => core::hint::unreachable_unchecked(),
}
}
unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
_policy: &dyn AllocPolicy<'a>,
mut f: F,
) -> Result<T, AllocErr>
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option<T>,
{
// The previous cell in the free list (not to be confused with the current
// cell's previously _adjacent_ cell).
let previous_free = head;
loop {
let current_free = previous_free.get();
if current_free.is_null() {
return Err(AllocErr);
}
let current_free = Cell::new(current_free);
// Now check if this cell can merge with the next cell in the free
// list.
//
// We don't re-check `policy.should_merge_adjacent_free_cells()` because
// the `NEXT_FREE_CELL_CAN_MERGE` bit only gets set after checking with
// the policy.
while (*current_free.get()).next_free_can_merge() {
let current = &*current_free.get();
current.clear_next_free_can_merge();
let prev_neighbor = unchecked_unwrap(
current
.header
.neighbors
.prev()
.and_then(|p| p.as_free_cell()),
);
current.header.neighbors.remove();
if CellHeader::next_cell_is_invalid(¤t.header.neighbors) {
CellHeader::set_next_cell_is_invalid(&prev_neighbor.header.neighbors);
}
previous_free.set(prev_neighbor);
current_free.set(prev_neighbor);
}
if let Some(result) = f(previous_free, &*current_free.get()) {
return Ok(result);
}
previous_free.set(&*(*current_free.get()).next_free_raw.get());
}
}
/// Do a first-fit allocation from the given free list.
unsafe fn alloc_first_fit<'a>(
size: Words,
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
walk_free_list(head, policy, |previous, current| {
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
}
None
})
}
unsafe fn alloc_with_refill<'a, 'b>(
size: Words,
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
) -> Result<NonNull<u8>, AllocErr> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
let cell = policy.new_cell_for_free_list(size, align)?;
let head = (*cell).insert_into_free_list(head, policy);
alloc_first_fit(size, align, head, policy)
}
/// A n64 allocator.
///
/// # Safety
///
/// When used in unix environments, cannot move in memory. Typically not an
/// issue if you're just using this as a `static` global allocator.
pub struct N64Alloc<'a> {
head: imp::Exclusive<*const FreeCell<'a>>,
size_classes: size_classes::SizeClasses<'a>,
}
unsafe impl<'a> Sync for N64Alloc<'a> {}
impl<'a> ConstInit for N64Alloc<'a> {
const INIT: N64Alloc<'a> = N64Alloc {
head: imp::Exclusive::INIT,
size_classes: size_classes::SizeClasses::INIT,
};
}
impl<'a> N64Alloc<'a> {
/// An initial `const` default construction of a `N64Alloc` allocator.
///
/// This is usable for initializing `static`s that get set as the global
/// allocator.
pub const INIT: Self = <Self as ConstInit>::INIT;
unsafe fn with_free_list_and_policy_for_size<F, T>(&self, size: Words, align: Bytes, f: F) -> T
where
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
| })
}
unsafe fn alloc_impl(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = Bytes(layout.size());
let align = if layout.align() == 0 {
Bytes(1)
} else {
Bytes(layout.align())
};
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
return Ok(NonNull::new_unchecked(align.0 as *mut u8));
}
let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
alloc_with_refill(word_size, align, head, policy)
})
}
unsafe fn dealloc_impl(&self, ptr: NonNull<u8>, layout: Layout) {
let size = Bytes(layout.size());
if size.0 == 0 {
return;
}
let size: Words = size.round_up_to();
let align = Bytes(layout.align());
self.with_free_list_and_policy_for_size(size, align, |head, policy| {
let cell = (ptr.as_ptr() as *mut CellHeader<'a> as *const CellHeader<'a>).offset(-1);
let cell = &*cell;
let cell: &AllocatedCell<'a> = &*(cell as *const CellHeader as *const AllocatedCell);
let free = cell.as_free_cell(policy);
if policy.should_merge_adjacent_free_cells() {
// Merging with the _previous_ adjacent cell is easy: it is
// already in the free list, so folding this cell into it is all
// that needs to be done. The free list can be left alone.
//
// Merging with the _next_ adjacent cell is a little harder. It
// is already in the free list, but we need to splice it out
// from the free list, since its header will become invalid
// after consolidation, and it is *this* cell's header that
// needs to be in the free list. But we don't have access to the
// pointer pointing to the soon-to-be-invalid header, and
// therefore can't adjust that pointer. So we have a delayed
// consolidation scheme. We insert this cell just after the next
// adjacent cell in the free list, and set the next adjacent
// cell's `NEXT_FREE_CAN_MERGE` bit. The next time that we walk
// the free list for allocation, the bit will be checked and the
// consolidation will happen at that time.
//
// If _both_ the previous and next adjacent cells are free, we
// are faced with a dilemma. We cannot merge all previous,
// current, and next cells together because our singly-linked
// free list doesn't allow for that kind of arbitrary appending
// and splicing. There are a few different kinds of tricks we
// could pull here, but they would increase implementation
// complexity and code size. Instead, we use a heuristic to
// choose whether to merge with the previous or next adjacent
// cell. We could choose to merge with whichever neighbor cell
// is smaller or larger, but we don't. We prefer the previous
// adjacent cell because we can greedily | {
if align <= size_of::<usize>() {
if let Some(head) = self.size_classes.get(size) {
let policy = size_classes::SizeClassAllocPolicy(&self.head);
let policy = &policy as &dyn AllocPolicy<'a>;
return head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result
});
}
}
let policy = &LARGE_ALLOC_POLICY as &dyn AllocPolicy<'a>;
self.head.with_exclusive_access(|head| {
let head_cell = Cell::new(*head);
let result = f(&head_cell, policy);
*head = head_cell.get();
result | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.