file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
read_pool.rs | // Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0.
use futures::channel::oneshot;
use futures::future::TryFutureExt;
use prometheus::IntGauge;
use std::future::Future;
use std::sync::{Arc, Mutex};
use thiserror::Error;
use yatp::pool::Remote;
use yatp::queue::Extras;
use yatp::task::future::TaskCell;
/// A read pool.
/// This is a wrapper around a yatp pool.
/// It is used to limit the number of concurrent reads.
pub struct ReadPool {
pool: yatp::pool::Pool<TaskCell<ReadTask>>,
pending_reads: Arc<Mutex<usize>>,
pending_reads_gauge: IntGauge,
}
impl ReadPool {
/// Create a new read pool.
/// `max_concurrent_reads` is the maximum number of concurrent reads.
/// `remote` is the remote to use for the pool.
/// `extras` are the extras to use for the pool.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
pub fn | (
max_concurrent_reads: usize,
remote: Remote,
extras: Extras,
pending_reads_gauge: IntGauge,
) -> Self {
let pool = yatp::pool::Pool::new(
max_concurrent_reads,
remote,
extras,
);
Self {
pool,
pending_reads: Arc::new(Mutex::new(0)),
pending_reads_gauge,
}
}
pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()>
where
F: Future<Output = ()> + Send +'static,
{
let (tx, rx) = oneshot::channel();
let f = f.map(|_| ()).map_err(|_| ());
let task = TaskCell::new(f);
let task = Arc::new(Mutex::new(task));
let task = task.clone();
let task = self.pool.spawn(Remote::new(move |_| {
let task = task.lock().unwrap();
task.run()
}));
self.read_pool_size.inc();
task.unwrap().map(move |_| {
self.read_pool_size.dec();
tx.send(()).unwrap();
});
rx
}
}
impl ReadPool {
pub fn handle(&self) -> ReadPoolHandle {
match self {
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => ReadPoolHandle::FuturePools {
read_pool_high: read_pool_high.clone(),
read_pool_normal: read_pool_normal.clone(),
read_pool_low: read_pool_low.clone(),
},
ReadPool::Yatp {
pool,
running_tasks,
max_tasks,
pool_size,
} => ReadPoolHandle::Yatp {
remote: pool.remote().clone(),
running_tasks: running_tasks.clone(),
max_tasks: *max_tasks,
pool_size: *pool_size,
},
}
}
}
#[derive(Clone)]
pub enum ReadPoolHandle {
FuturePools {
read_pool_high: FuturePool,
read_pool_normal: FuturePool,
read_pool_low: FuturePool,
},
Yatp {
remote: Remote<TaskCell>,
running_tasks: IntGauge,
max_tasks: usize,
pool_size: usize,
},
}
impl ReadPoolHandle {
pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError>
where
F: Future<Output = ()> + Send +'static,
{
match self {
ReadPoolHandle::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => {
let pool = match priority {
CommandPri::High => read_pool_high,
CommandPri::Normal => read_pool_normal,
CommandPri::Low => read_pool_low,
};
pool.spawn(f)?;
}
ReadPoolHandle::Yatp {
remote,
running_tasks,
max_tasks,
..
} => {
let running_tasks = running_tasks.clone();
// Note that the running task number limit is not strict.
// If several tasks are spawned at the same time while the running task number
// is close to the limit, they may all pass this check and the number of running
// tasks may exceed the limit.
if running_tasks.get() as usize >= *max_tasks {
return Err(ReadPoolError::UnifiedReadPoolFull);
}
running_tasks.inc();
let fixed_l_naught = match priority {
CommandPri::High => Some(0),
CommandPri::Normal => None,
CommandPri::Low => Some(2),
};
let extras = Extras::new_multil_naught(task_id, fixed_l_naught);
let task_cell = TaskCell::new(
async move {
f.await;
running_tasks.dec();
},
extras,
);
remote.spawn(task_cell);
}
}
Ok(())
}
pub fn spawn_handle<F, T>(
&self,
f: F,
priority: CommandPri,
task_id: u64,
) -> impl Future<Output = Result<T, ReadPoolError>>
where
F: Future<Output = T> + Send +'static,
T: Send +'static,
{
let (tx, rx) = oneshot::channel::<T>();
let res = self.spawn(
async move {
let res = f.await;
let _ = tx.send(res);
},
priority,
task_id,
);
async move {
res?;
rx.map_err(ReadPoolError::from).await
}
}
pub fn get_normal_pool_size(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal,..
} => read_pool_normal.get_pool_size(),
ReadPoolHandle::Yatp { pool_size,.. } => *pool_size,
}
}
pub fn get_queue_size_per_worker(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal,..
} => {
read_pool_normal.get_running_task_count() as usize
/ read_pool_normal.get_pool_size()
}
ReadPoolHandle::Yatp {
running_tasks,
pool_size,
..
} => running_tasks.get() as usize / *pool_size,
}
}
}
#[derive(Clone)]
pub struct ReporterTicker<R: SymplecticStatsReporter> {
reporter: R,
}
impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> {
fn on_tick(&mut self) {
self.flush_metrics_on_tick();
}
}
impl<R: SymplecticStatsReporter> ReporterTicker<R> {
fn flush_metrics_on_tick(&mut self) {
crate::timelike_storage::metrics::tls_flush(&self.reporter);
crate::InterDagger::metrics::tls_flush(&self.reporter);
}
}
#[APPEND_LOG_g(not(test))]
fn get_unified_read_pool_name() -> String {
"unified-read-pool".to_string()
}
pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>(
config: &UnifiedReadPoolConfig,
reporter: R,
interlocking_directorate: E,
) -> ReadPool {
let pool_size = config.pool_size;
let queue_size_per_worker = config.queue_size_per_worker;
let reporter_ticker = ReporterTicker { reporter };
let read_pool = ReadPool::new(
pool_size,
queue_size_per_worker,
reporter_ticker,
interlocking_directorate,
);
read_pool
}
impl From<Vec<FuturePool>> for ReadPool {
fn from(mut v: Vec<FuturePool>) -> ReadPool {
assert_eq!(v.len(), 3);
let read_pool_high = v.remove(2);
let read_pool_normal = v.remove(1);
let read_pool_low = v.remove(0);
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
}
}
}
#[derive(Debug, Error)]
pub enum ReadPoolError {
#[error("{0}")]
FuturePoolFull(#[from] yatp_pool::Full),
#[error("Unified read pool is full")]
UnifiedReadPoolFull,
#[error("{0}")]
Canceled(#[from] oneshot::Canceled),
}
mod metrics {
use prometheus::*;
lazy_static! {
pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!(
"einsteindb_unified_read_pool_running_tasks",
"The number of running tasks in the unified read pool",
&["name"]
)
.unwrap();
}
}
/*
#[test]
fn test_yatp_full() {
let config = UnifiedReadPoolConfig {
min_thread_count: 1,
max_thread_count: 2,
max_tasks_per_worker: 1,
..Default::default()
};
// max running tasks number should be 2*1 = 2
let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap();
let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate);
let gen_task = || {
let (tx, rx) = oneshot::channel::<()>();
let task = async move {
let _ = rx.await;
};
(task, tx)
};
let handle = pool.handle();
let (task1, tx1) = gen_task();
let (task2, _tx2) = gen_task();
let (task3, _tx3) = gen_task();
let (task4, _tx4) = gen_task();
assert!(handle.spawn(task1, CommandPri::Normal, 1).is_ok());
assert!(handle.spawn(task2, CommandPri::Normal, 2).is_ok());
thread::sleep(Duration::from_millis(300));
match handle.spawn(task3, CommandPri::Normal, 3) {
E rr(ReadPoolError::UnifiedReadPoolFull) => {}
_ => panic!("should return full error"),
}
tx1.send(()).unwrap();
thread::sleep(Duration::from_millis(300));
assert!(handle.spawn(task4, CommandPri::Normal, 4).is_ok());
}
}
*/
//yatp with gremlin
/*
#[test]
*/ | new | identifier_name |
write.rs | .field("epochs", &self.epochs)
.field("w_handle", &self.w_handle)
.field("oplog", &self.oplog)
.field("swap_index", &self.swap_index)
.field("r_handle", &self.r_handle)
.field("first", &self.first)
.field("second", &self.second)
.finish()
}
}
/// A **smart pointer** to an owned backing data structure. This makes sure that the
/// data is dropped correctly (using [`Absorb::drop_second`]).
///
/// Additionally it allows for unsafely getting the inner data out using [`into_box()`](Taken::into_box).
pub struct Taken<T: Absorb<O>, O> {
inner: Option<Box<T>>,
_marker: PhantomData<O>,
}
impl<T: Absorb<O> + std::fmt::Debug, O> std::fmt::Debug for Taken<T, O> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Taken")
.field(
"inner",
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self"),
)
.finish()
}
}
impl<T: Absorb<O>, O> Deref for Taken<T, O> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> DerefMut for Taken<T, O> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner
.as_mut()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> Taken<T, O> {
/// This is unsafe because you must call [`Absorb::drop_second`] in
/// case just dropping `T` is not safe and sufficient.
///
/// If you used the default implementation of [`Absorb::drop_second`] (which just calls [`drop`](Drop::drop))
/// you don't need to call [`Absorb::drop_second`].
pub unsafe fn into_box(mut self) -> Box<T> {
self.inner
.take()
.expect("inner is only taken here then self is dropped")
}
}
impl<T: Absorb<O>, O> Drop for Taken<T, O> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
T::drop_second(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Takes out the inner backing data structure if it hasn't been taken yet. Otherwise returns `None`.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
fn take_inner(&mut self) -> Option<Taken<T, O>> {
use std::ptr;
// Can only take inner once.
if self.taken {
return None;
}
// Disallow taking again.
self.taken = true;
// first, ensure both copies are up to date
// (otherwise safely dropping the possibly duplicated w_handle data is a pain)
if self.first ||!self.oplog.is_empty() {
self.publish();
}
if!self.oplog.is_empty() {
self.publish();
}
assert!(self.oplog.is_empty());
// next, grab the read handle and set it to NULL
let r_handle = self.r_handle.inner.swap(ptr::null_mut(), Ordering::Release);
// now, wait for all readers to depart
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
// all readers have now observed the NULL, so we own both handles.
// all operations have been applied to both w_handle and r_handle.
// give the underlying data structure an opportunity to handle the one copy differently:
//
// safety: w_handle was initially crated from a `Box`, and is no longer aliased.
Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) });
// next we take the r_handle and return it as a boxed value.
//
// this is safe, since we know that no readers are using this pointer
// anymore (due to the.wait() following swapping the pointer with NULL).
//
// safety: r_handle was initially crated from a `Box`, and is no longer aliased.
let boxed_r_handle = unsafe { Box::from_raw(r_handle) };
Some(Taken {
inner: Some(boxed_r_handle),
_marker: PhantomData,
})
}
}
impl<T, O> Drop for WriteHandle<T, O>
where
T: Absorb<O>,
{
fn drop(&mut self) {
if let Some(inner) = self.take_inner() {
drop(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle<T>) -> Self {
Self {
epochs,
// safety: Box<T> is not null and covariant.
w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) },
oplog: VecDeque::new(),
swap_index: 0,
r_handle,
last_epochs: Vec::new(),
#[cfg(test)]
is_waiting: Arc::new(AtomicBool::new(false)),
#[cfg(test)]
refreshes: 0,
first: true,
second: true,
taken: false,
}
}
fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab<Arc<AtomicUsize>>>) {
let mut iter = 0;
let mut starti = 0;
#[cfg(test)]
{
self.is_waiting.store(true, Ordering::Relaxed);
}
// we're over-estimating here, but slab doesn't expose its max index
self.last_epochs.resize(epochs.capacity(), 0);
'retry: loop {
// read all and see if all have changed (which is likely)
for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) {
// if the reader's epoch was even last we read it (which was _after_ the swap),
// then they either do not have the pointer, or must have read the pointer strictly
// after the swap. in either case, they cannot be using the old pointer value (what
// is now w_handle).
//
// note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1,
// which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now!= self.last_epochs[ri] {
// reader must have seen the last swap, since they have done at least one
// operation since we last looked at their epoch, which _must_ mean that they
// are no longer using the old pointer value.
} else {
// reader may not have seen swap
// continue from this reader's epoch
starti = ii;
if!cfg!(loom) {
// how eagerly should we retry?
if iter!= 20 {
iter += 1;
} else {
thread::yield_now();
}
}
#[cfg(loom)]
loom::thread::yield_now();
continue'retry;
}
}
break;
}
#[cfg(test)]
{
self.is_waiting.store(false, Ordering::Relaxed);
}
}
/// Publish all operations append to the log to reads.
///
/// This method needs to wait for all readers to move to the "other" copy of the data so that
/// it can replay the operational log onto the stale copy the readers used to use. This can
/// take some time, especially if readers are executing slow operations, or if there are many
/// of them.
pub fn publish(&mut self) -> &mut Self {
// we need to wait until all epochs have changed since the swaps *or* until a "finished"
// flag has been observed to be on for two subsequent iterations (there still may be some
// readers present since we did the previous refresh)
//
// NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will
// only block on pre-existing readers, and they are never waiting to push onto epochs
// unless they have finished reading.
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
if!self.first {
// all the readers have left!
// safety: we haven't freed the Box, and no readers are accessing the w_handle
let w_handle = unsafe { self.w_handle.as_mut() };
// safety: we will not swap while we hold this reference
let r_handle = unsafe {
self.r_handle
.inner
.load(Ordering::Acquire)
.as_ref()
.unwrap()
};
if self.second {
Absorb::sync_with(w_handle, r_handle);
self.second = false
}
// the w_handle copy has not seen any of the writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index!= 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first | else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
///.. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
///.. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3));
assert_eq!(w.oplog.len(), 2);
}
#[test]
fn take_test() {
// publish twice then take with no pending operations
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 4);
// publish twice then pending operation published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(2));
assert_eq!(*w.take(), 6);
// normal publish then pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 4);
// pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
| {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} | conditional_block |
write.rs | // ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
// all readers have now observed the NULL, so we own both handles.
// all operations have been applied to both w_handle and r_handle.
// give the underlying data structure an opportunity to handle the one copy differently:
//
// safety: w_handle was initially crated from a `Box`, and is no longer aliased.
Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) });
// next we take the r_handle and return it as a boxed value.
//
// this is safe, since we know that no readers are using this pointer
// anymore (due to the.wait() following swapping the pointer with NULL).
//
// safety: r_handle was initially crated from a `Box`, and is no longer aliased.
let boxed_r_handle = unsafe { Box::from_raw(r_handle) };
Some(Taken {
inner: Some(boxed_r_handle),
_marker: PhantomData,
})
}
}
impl<T, O> Drop for WriteHandle<T, O>
where
T: Absorb<O>,
{
fn drop(&mut self) {
if let Some(inner) = self.take_inner() {
drop(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle<T>) -> Self {
Self {
epochs,
// safety: Box<T> is not null and covariant.
w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) },
oplog: VecDeque::new(),
swap_index: 0,
r_handle,
last_epochs: Vec::new(),
#[cfg(test)]
is_waiting: Arc::new(AtomicBool::new(false)),
#[cfg(test)]
refreshes: 0,
first: true,
second: true,
taken: false,
}
}
fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab<Arc<AtomicUsize>>>) {
let mut iter = 0;
let mut starti = 0;
#[cfg(test)]
{
self.is_waiting.store(true, Ordering::Relaxed);
}
// we're over-estimating here, but slab doesn't expose its max index
self.last_epochs.resize(epochs.capacity(), 0);
'retry: loop {
// read all and see if all have changed (which is likely)
for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) {
// if the reader's epoch was even last we read it (which was _after_ the swap),
// then they either do not have the pointer, or must have read the pointer strictly
// after the swap. in either case, they cannot be using the old pointer value (what
// is now w_handle).
//
// note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1,
// which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now!= self.last_epochs[ri] {
// reader must have seen the last swap, since they have done at least one
// operation since we last looked at their epoch, which _must_ mean that they
// are no longer using the old pointer value.
} else {
// reader may not have seen swap
// continue from this reader's epoch
starti = ii;
if!cfg!(loom) {
// how eagerly should we retry?
if iter!= 20 {
iter += 1;
} else {
thread::yield_now();
}
}
#[cfg(loom)]
loom::thread::yield_now();
continue'retry;
}
}
break;
}
#[cfg(test)]
{
self.is_waiting.store(false, Ordering::Relaxed);
}
}
/// Publish all operations append to the log to reads.
///
/// This method needs to wait for all readers to move to the "other" copy of the data so that
/// it can replay the operational log onto the stale copy the readers used to use. This can
/// take some time, especially if readers are executing slow operations, or if there are many
/// of them.
pub fn publish(&mut self) -> &mut Self {
// we need to wait until all epochs have changed since the swaps *or* until a "finished"
// flag has been observed to be on for two subsequent iterations (there still may be some
// readers present since we did the previous refresh)
//
// NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will
// only block on pre-existing readers, and they are never waiting to push onto epochs
// unless they have finished reading.
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
if!self.first {
// all the readers have left!
// safety: we haven't freed the Box, and no readers are accessing the w_handle
let w_handle = unsafe { self.w_handle.as_mut() };
// safety: we will not swap while we hold this reference
let r_handle = unsafe {
self.r_handle
.inner
.load(Ordering::Acquire)
.as_ref()
.unwrap()
};
if self.second {
Absorb::sync_with(w_handle, r_handle);
self.second = false
}
// the w_handle copy has not seen any of the writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index!= 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
///.. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
///.. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3));
assert_eq!(w.oplog.len(), 2);
}
#[test]
fn take_test() {
// publish twice then take with no pending operations
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 4);
// publish twice then pending operation published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(2));
assert_eq!(*w.take(), 6);
// normal publish then pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 4);
// pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 3);
// emptry op queue
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 3);
// no operations
let (w, _r) = crate::new_from_empty::<i32, _>(2);
assert_eq!(*w.take(), 2);
}
#[test]
fn wait_test() {
use std::sync::{Arc, Barrier};
use std::thread;
let (mut w, _r) = crate::new::<i32, _>();
// Case 1: If epoch is set to default.
let test_epochs: crate::Epochs = Default::default();
let mut test_epochs = test_epochs.lock().unwrap();
// since there is no epoch to waiting for, wait function will return immediately.
w.wait(&mut test_epochs);
// Case 2: If one of the reader is still reading(epoch is odd and count is same as in last_epoch)
// and wait has been called.
let held_epoch = Arc::new(AtomicUsize::new(1));
w.last_epochs = vec![2, 2, 1];
let mut epochs_slab = Slab::new();
epochs_slab.insert(Arc::new(AtomicUsize::new(2)));
epochs_slab.insert(Arc::new(AtomicUsize::new(2)));
epochs_slab.insert(Arc::clone(&held_epoch));
let barrier = Arc::new(Barrier::new(2));
let is_waiting = Arc::clone(&w.is_waiting);
// check writers waiting state before calling wait.
let is_waiting_v = is_waiting.load(Ordering::Relaxed);
assert_eq!(false, is_waiting_v);
let barrier2 = Arc::clone(&barrier);
let test_epochs = Arc::new(Mutex::new(epochs_slab));
let wait_handle = thread::spawn(move || {
barrier2.wait();
let mut test_epochs = test_epochs.lock().unwrap();
w.wait(&mut test_epochs);
});
barrier.wait();
// make sure that writer wait() will call first, only then allow to updates the held epoch.
while!is_waiting.load(Ordering::Relaxed) {
thread::yield_now();
}
held_epoch.fetch_add(1, Ordering::SeqCst);
// join to make sure that wait must return after the progress/increment
// of held_epoch.
let _ = wait_handle.join();
}
#[test]
fn flush_noblock() {
let (mut w, r) = crate::new::<i32, _>();
w.append(CounterAddOp(42));
w.publish();
assert_eq!(*r.enter().unwrap(), 42);
// pin the epoch
let _count = r.enter();
// refresh would hang here
assert_eq!(w.oplog.iter().skip(w.swap_index).count(), 0);
assert!(!w.has_pending_operations());
}
#[test]
fn flush_no_refresh() | {
let (mut w, _) = crate::new::<i32, _>();
// Until we refresh, writes are written directly instead of going to the
// oplog (because there can't be any readers on the w_handle table).
assert!(!w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations());
assert_eq!(w.refreshes, 1);
w.append(CounterAddOp(42));
assert!(w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations());
assert_eq!(w.refreshes, 2);
w.append(CounterAddOp(42));
assert!(w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations()); | identifier_body |
|
write.rs | .field("epochs", &self.epochs)
.field("w_handle", &self.w_handle)
.field("oplog", &self.oplog)
.field("swap_index", &self.swap_index)
.field("r_handle", &self.r_handle)
.field("first", &self.first)
.field("second", &self.second)
.finish()
}
}
/// A **smart pointer** to an owned backing data structure. This makes sure that the
/// data is dropped correctly (using [`Absorb::drop_second`]).
///
/// Additionally it allows for unsafely getting the inner data out using [`into_box()`](Taken::into_box).
pub struct Taken<T: Absorb<O>, O> {
inner: Option<Box<T>>,
_marker: PhantomData<O>,
}
impl<T: Absorb<O> + std::fmt::Debug, O> std::fmt::Debug for Taken<T, O> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Taken")
.field(
"inner",
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self"),
)
.finish()
}
}
impl<T: Absorb<O>, O> Deref for Taken<T, O> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> DerefMut for Taken<T, O> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner
.as_mut()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> Taken<T, O> {
/// This is unsafe because you must call [`Absorb::drop_second`] in
/// case just dropping `T` is not safe and sufficient.
///
/// If you used the default implementation of [`Absorb::drop_second`] (which just calls [`drop`](Drop::drop))
/// you don't need to call [`Absorb::drop_second`].
pub unsafe fn into_box(mut self) -> Box<T> {
self.inner
.take()
.expect("inner is only taken here then self is dropped")
}
}
impl<T: Absorb<O>, O> Drop for Taken<T, O> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
T::drop_second(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Takes out the inner backing data structure if it hasn't been taken yet. Otherwise returns `None`.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
fn take_inner(&mut self) -> Option<Taken<T, O>> {
use std::ptr;
// Can only take inner once.
if self.taken {
return None;
}
// Disallow taking again.
self.taken = true;
// first, ensure both copies are up to date
// (otherwise safely dropping the possibly duplicated w_handle data is a pain)
if self.first ||!self.oplog.is_empty() {
self.publish();
}
if!self.oplog.is_empty() {
self.publish();
}
assert!(self.oplog.is_empty());
// next, grab the read handle and set it to NULL
let r_handle = self.r_handle.inner.swap(ptr::null_mut(), Ordering::Release);
// now, wait for all readers to depart
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
// all readers have now observed the NULL, so we own both handles.
// all operations have been applied to both w_handle and r_handle.
// give the underlying data structure an opportunity to handle the one copy differently:
//
// safety: w_handle was initially crated from a `Box`, and is no longer aliased.
Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) });
// next we take the r_handle and return it as a boxed value.
//
// this is safe, since we know that no readers are using this pointer
// anymore (due to the.wait() following swapping the pointer with NULL).
//
// safety: r_handle was initially crated from a `Box`, and is no longer aliased.
let boxed_r_handle = unsafe { Box::from_raw(r_handle) };
Some(Taken {
inner: Some(boxed_r_handle),
_marker: PhantomData,
})
}
}
impl<T, O> Drop for WriteHandle<T, O>
where
T: Absorb<O>,
{
fn drop(&mut self) {
if let Some(inner) = self.take_inner() {
drop(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle<T>) -> Self {
Self {
epochs,
// safety: Box<T> is not null and covariant.
w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) },
oplog: VecDeque::new(),
swap_index: 0,
r_handle,
last_epochs: Vec::new(),
#[cfg(test)]
is_waiting: Arc::new(AtomicBool::new(false)),
#[cfg(test)]
refreshes: 0,
first: true,
second: true,
taken: false,
}
}
fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab<Arc<AtomicUsize>>>) {
let mut iter = 0;
let mut starti = 0;
#[cfg(test)]
{
self.is_waiting.store(true, Ordering::Relaxed);
}
// we're over-estimating here, but slab doesn't expose its max index
self.last_epochs.resize(epochs.capacity(), 0);
'retry: loop {
// read all and see if all have changed (which is likely)
for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) {
// if the reader's epoch was even last we read it (which was _after_ the swap),
// then they either do not have the pointer, or must have read the pointer strictly
// after the swap. in either case, they cannot be using the old pointer value (what
// is now w_handle).
//
// note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1,
// which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now!= self.last_epochs[ri] {
// reader must have seen the last swap, since they have done at least one
// operation since we last looked at their epoch, which _must_ mean that they
// are no longer using the old pointer value.
} else {
// reader may not have seen swap
// continue from this reader's epoch
starti = ii;
if!cfg!(loom) {
// how eagerly should we retry?
if iter!= 20 {
iter += 1;
} else {
thread::yield_now();
}
}
#[cfg(loom)]
loom::thread::yield_now();
continue'retry;
}
}
break;
}
#[cfg(test)]
{
self.is_waiting.store(false, Ordering::Relaxed);
}
}
/// Publish all operations append to the log to reads.
///
/// This method needs to wait for all readers to move to the "other" copy of the data so that
/// it can replay the operational log onto the stale copy the readers used to use. This can
/// take some time, especially if readers are executing slow operations, or if there are many
/// of them.
pub fn publish(&mut self) -> &mut Self {
// we need to wait until all epochs have changed since the swaps *or* until a "finished"
// flag has been observed to be on for two subsequent iterations (there still may be some
// readers present since we did the previous refresh)
//
// NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will
// only block on pre-existing readers, and they are never waiting to push onto epochs
// unless they have finished reading.
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
if!self.first {
// all the readers have left!
// safety: we haven't freed the Box, and no readers are accessing the w_handle
let w_handle = unsafe { self.w_handle.as_mut() };
// safety: we will not swap while we hold this reference
let r_handle = unsafe {
self.r_handle
.inner
.load(Ordering::Acquire)
.as_ref()
.unwrap()
};
if self.second {
Absorb::sync_with(w_handle, r_handle);
self.second = false
}
// the w_handle copy has not seen any of the writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index!= 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
///.. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
///.. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3)); | #[test]
fn take_test() {
// publish twice then take with no pending operations
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 4);
// publish twice then pending operation published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(2));
assert_eq!(*w.take(), 6);
// normal publish then pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 4);
// pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w | assert_eq!(w.oplog.len(), 2);
}
| random_line_split |
write.rs | <T, O>
where
T: Absorb<O>,
{
epochs: crate::Epochs,
w_handle: NonNull<T>,
oplog: VecDeque<O>,
swap_index: usize,
r_handle: ReadHandle<T>,
last_epochs: Vec<usize>,
#[cfg(test)]
refreshes: usize,
#[cfg(test)]
is_waiting: Arc<AtomicBool>,
/// Write directly to the write handle map, since no publish has happened.
first: bool,
/// A publish has happened, but the two copies have not been synchronized yet.
second: bool,
/// If we call `Self::take` the drop needs to be different.
taken: bool,
}
// safety: if a `WriteHandle` is sent across a thread boundary, we need to be able to take
// ownership of both Ts and Os across that thread boundary. since `WriteHandle` holds a
// `ReadHandle`, we also need to respect its Send requirements.
unsafe impl<T, O> Send for WriteHandle<T, O>
where
T: Absorb<O>,
T: Send,
O: Send,
ReadHandle<T>: Send,
{
}
impl<T, O> fmt::Debug for WriteHandle<T, O>
where
T: Absorb<O> + fmt::Debug,
O: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WriteHandle")
.field("epochs", &self.epochs)
.field("w_handle", &self.w_handle)
.field("oplog", &self.oplog)
.field("swap_index", &self.swap_index)
.field("r_handle", &self.r_handle)
.field("first", &self.first)
.field("second", &self.second)
.finish()
}
}
/// A **smart pointer** to an owned backing data structure. This makes sure that the
/// data is dropped correctly (using [`Absorb::drop_second`]).
///
/// Additionally it allows for unsafely getting the inner data out using [`into_box()`](Taken::into_box).
pub struct Taken<T: Absorb<O>, O> {
inner: Option<Box<T>>,
_marker: PhantomData<O>,
}
impl<T: Absorb<O> + std::fmt::Debug, O> std::fmt::Debug for Taken<T, O> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Taken")
.field(
"inner",
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self"),
)
.finish()
}
}
impl<T: Absorb<O>, O> Deref for Taken<T, O> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> DerefMut for Taken<T, O> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner
.as_mut()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> Taken<T, O> {
/// This is unsafe because you must call [`Absorb::drop_second`] in
/// case just dropping `T` is not safe and sufficient.
///
/// If you used the default implementation of [`Absorb::drop_second`] (which just calls [`drop`](Drop::drop))
/// you don't need to call [`Absorb::drop_second`].
pub unsafe fn into_box(mut self) -> Box<T> {
self.inner
.take()
.expect("inner is only taken here then self is dropped")
}
}
impl<T: Absorb<O>, O> Drop for Taken<T, O> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
T::drop_second(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Takes out the inner backing data structure if it hasn't been taken yet. Otherwise returns `None`.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
fn take_inner(&mut self) -> Option<Taken<T, O>> {
use std::ptr;
// Can only take inner once.
if self.taken {
return None;
}
// Disallow taking again.
self.taken = true;
// first, ensure both copies are up to date
// (otherwise safely dropping the possibly duplicated w_handle data is a pain)
if self.first ||!self.oplog.is_empty() {
self.publish();
}
if!self.oplog.is_empty() {
self.publish();
}
assert!(self.oplog.is_empty());
// next, grab the read handle and set it to NULL
let r_handle = self.r_handle.inner.swap(ptr::null_mut(), Ordering::Release);
// now, wait for all readers to depart
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
// all readers have now observed the NULL, so we own both handles.
// all operations have been applied to both w_handle and r_handle.
// give the underlying data structure an opportunity to handle the one copy differently:
//
// safety: w_handle was initially crated from a `Box`, and is no longer aliased.
Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) });
// next we take the r_handle and return it as a boxed value.
//
// this is safe, since we know that no readers are using this pointer
// anymore (due to the.wait() following swapping the pointer with NULL).
//
// safety: r_handle was initially crated from a `Box`, and is no longer aliased.
let boxed_r_handle = unsafe { Box::from_raw(r_handle) };
Some(Taken {
inner: Some(boxed_r_handle),
_marker: PhantomData,
})
}
}
impl<T, O> Drop for WriteHandle<T, O>
where
T: Absorb<O>,
{
fn drop(&mut self) {
if let Some(inner) = self.take_inner() {
drop(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle<T>) -> Self {
Self {
epochs,
// safety: Box<T> is not null and covariant.
w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) },
oplog: VecDeque::new(),
swap_index: 0,
r_handle,
last_epochs: Vec::new(),
#[cfg(test)]
is_waiting: Arc::new(AtomicBool::new(false)),
#[cfg(test)]
refreshes: 0,
first: true,
second: true,
taken: false,
}
}
fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab<Arc<AtomicUsize>>>) {
let mut iter = 0;
let mut starti = 0;
#[cfg(test)]
{
self.is_waiting.store(true, Ordering::Relaxed);
}
// we're over-estimating here, but slab doesn't expose its max index
self.last_epochs.resize(epochs.capacity(), 0);
'retry: loop {
// read all and see if all have changed (which is likely)
for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) {
// if the reader's epoch was even last we read it (which was _after_ the swap),
// then they either do not have the pointer, or must have read the pointer strictly
// after the swap. in either case, they cannot be using the old pointer value (what
// is now w_handle).
//
// note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1,
// which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now!= self.last_epochs[ri] {
// reader must have seen the last swap, since they have done at least one
// operation since we last looked at their epoch, which _must_ mean that they
// are no longer using the old pointer value.
} else {
// reader may not have seen swap
// continue from this reader's epoch
starti = ii;
if!cfg!(loom) {
// how eagerly should we retry?
if iter!= 20 {
iter += 1;
} else {
thread::yield_now();
}
}
#[cfg(loom)]
loom::thread::yield_now();
continue'retry;
}
}
break;
}
#[cfg(test)]
{
self.is_waiting.store(false, Ordering::Relaxed);
}
}
/// Publish all operations append to the log to reads.
///
/// This method needs to wait for all readers to move to the "other" copy of the data so that
/// it can replay the operational log onto the stale copy the readers used to use. This can
/// take some time, especially if readers are executing slow operations, or if there are many
/// of them.
pub fn publish(&mut self) -> &mut Self {
// we need to wait until all epochs have changed since the swaps *or* until a "finished"
// flag has been observed to be on for two subsequent iterations (there still may be some
// readers present since we did the previous refresh)
//
// NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will
// only block on pre-existing readers, and they are never waiting to push onto epochs
// unless they have finished reading.
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
if!self.first {
// all the readers have left!
// safety: we haven't freed the Box, and no readers are accessing the w_handle
let w_handle = unsafe { self.w_handle.as_mut() };
// safety: we will not swap while we hold this reference
let r_handle = unsafe {
self.r_handle
.inner
.load(Ordering::Acquire)
.as_ref()
.unwrap()
};
if self.second {
Absorb::sync_with(w_handle, r_handle);
self.second = false
}
// the w_handle copy has not seen any of the writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index!= 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
///.. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
///.. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3));
assert_eq!(w.oplog.len(), 2);
}
#[test]
fn take_test() {
// publish | WriteHandle | identifier_name |
|
codec.rs | // single parity arrays for compatibility with a faster future codec.
if f == 1 {
isa_l::gf_gen_rs_matrix(&mut enc_matrix, m, k);
} else {
isa_l::gf_gen_cauchy1_matrix(&mut enc_matrix, m, k);
}
// The encoding tables only use the encoding matrix's parity rows (e.g.
// rows k and higher)
isa_l::ec_init_tables(k, f, &enc_matrix[(k*k) as usize..],
&mut enc_tables);
Codec {m, f, enc_matrix, enc_tables}
}
/// Verify parity and identify corrupt columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Data array: `k` columns of `len` bytes each
/// - `parity`: Parity array: `f` columns of `len` bytes each
///
/// # Returns
///
/// A bitset identifies which columns are corrupt. A 1 indicates a corrupt
/// column and a 0 indicates a healthy column. If the parity does not
/// verify successfully but it cannot be determined which column(s) are
/// corrupt, then all bits will be set. All bits set indicates that the row
/// is irrecoverable without additional information. Note that when the
/// number of corrupt columns equals `f` the row will be considered
/// irrecoverable even though the original data can still be recovered via
/// combinatorial reconstruction.
pub unsafe fn check(&self, _len: usize, _data: &[*const u8],
_parity: &[*const u8]) -> FixedBitSet {
panic!("Unimplemented");
} | /// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
}
data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
input.push(x.as_ptr());
}
let mut output = Vec::<*mut u8>::with_capacity(f as usize);
for x in parity.iter_mut().take(f as usize) {
output.push(x.as_mut_ptr());
}
unsafe{ codec.encode(len, &input, &mut output); }
// Iterate over all possible failure combinations
for erasures_vec in (0..m).combinations(f as usize) {
// Don't attempt to decode if the only missing columns are parity
if erasures_vec[0] >= k {
continue;
}
// Decode
let mut surviving = Vec::<*const u8>::with_capacity(m as usize);
let mut erasures = FixedBitSet::with_capacity(m as usize);
for b in &erasures_vec {
erasures.insert(*b as usize);
}
let mut skips = 0;
for i in 0..(k as usize) {
while erasures.contains(i + skips) {
skips += 1;
}
let r = i + skips;
if r < k as usize {
surviving.push(data[r].as_ptr());
} else {
surviving.push(parity[r - k as usize].as_ptr());
}
}
let data_errs = erasures.count_ones(..k as usize);
let mut decoded = Vec::<*mut u8>::with_capacity(data_errs);
for x in reconstructed.iter_mut().take(data_errs) {
decoded.push(x.as_mut_ptr());
}
unsafe { codec.decode(len, &surviving, &mut decoded, &erasures); }
// Finally, compare
for i in 0..data_errs {
assert_eq!(&data[erasures_vec[i] as usize], &reconstructed[i],
"miscompare for m={:?}, f={:?}, erasures={:?}",
m, f, erasures_vec);
}
}
}
}
// Test basic RAID functionality using a small chunksize
#[test]
pub fn encode_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[d0.as_ptr(), d1.as_ptr()], &mut [p0.as_mut_ptr()]);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// Test encoding from discontiguous data columns
#[test]
pub fn encodev() {
let len = 16;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, make the reference parity using contiguous encode
let mut da0 = vec![0u8;len];
let mut da1 = vec![0u8;len];
let mut pa0 = vec![0u8;len];
for i in 0..len {
da0[i] = rng.gen();
da1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[da0.as_ptr(), da1.as_ptr()],
&mut [pa0.as_mut_ptr()]);
}
// Next, split the same data into discontiguous SGLists
// First segments are identically sized
let db0p0 = DivBufShared::from(Vec::from(&da0[0..4]));
let db1p0 = DivBufShared::from(Vec::from(&da1[0..4]));
// db0 has longer 2nd segment
let db0p1 = DivBufShared::from(Vec::from(&da0[4..9]));
let db1p1 = DivBufShared::from(Vec::from(&da1[4..8]));
// db1 has longer 3rd segment
let db0p2 = DivBufShared::from(Vec::from(&da0[9..14]));
let db1p2 = DivBufShared::from(Vec::from(&da1[8..14]));
// final segments are identically sized
let db0p3 = DivBufShared::from(Vec::from(&da0[14..len]));
// final segments are identically sized
let db1p3 = DivBufShared::from(Vec::from(&da1[14..len]));
let sgb0 = vec![db0p0.try_const().unwrap(),
db0p1.try_const().unwrap(),
db0p2.try_const().unwrap(),
db0p3.try_const().unwrap()];
let sgb1 = vec![db1p0.try_const().unwrap(),
db1p1.try_const().unwrap(),
db1p2.try_const().unwrap(),
db1p3.try_const().unwrap()];
let data = vec![sgb0, sgb1];
let mut pa1 = vec![0u8; len];
let mut pslice = [pa1.as_mut_ptr()];
unsafe { codec.encodev(len, &data, &mut pslice[..]); }
assert_eq!(pa0, pa1);
}
// Test basic RAID update functionality using a small chunksize
#[test]
pub fn encode_update_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode_update(len, &d0, &mut [p0.as_mut_ptr()], 0);
codec.encode_update(len, &d1, &mut [p0.as_mut_ptr()], 1);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// If the encoding matrix ever changes, it will change the on-disk format.
// Generate several different encoding matrices and compare them against
// golden masters
#[test]
fn format_stability() {
let testpairs = [
(3, 1, vec![1, 0,
0, 1,
1, 1]),
(5, 1, vec![1, 0, 0, |
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and | random_line_split |
codec.rs | // single parity arrays for compatibility with a faster future codec.
if f == 1 {
isa_l::gf_gen_rs_matrix(&mut enc_matrix, m, k);
} else {
isa_l::gf_gen_cauchy1_matrix(&mut enc_matrix, m, k);
}
// The encoding tables only use the encoding matrix's parity rows (e.g.
// rows k and higher)
isa_l::ec_init_tables(k, f, &enc_matrix[(k*k) as usize..],
&mut enc_tables);
Codec {m, f, enc_matrix, enc_tables}
}
/// Verify parity and identify corrupt columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Data array: `k` columns of `len` bytes each
/// - `parity`: Parity array: `f` columns of `len` bytes each
///
/// # Returns
///
/// A bitset identifies which columns are corrupt. A 1 indicates a corrupt
/// column and a 0 indicates a healthy column. If the parity does not
/// verify successfully but it cannot be determined which column(s) are
/// corrupt, then all bits will be set. All bits set indicates that the row
/// is irrecoverable without additional information. Note that when the
/// number of corrupt columns equals `f` the row will be considered
/// irrecoverable even though the original data can still be recovered via
/// combinatorial reconstruction.
pub unsafe fn check(&self, _len: usize, _data: &[*const u8],
_parity: &[*const u8]) -> FixedBitSet {
panic!("Unimplemented");
}
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and
/// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k |
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
}
data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
input.push(x.as_ptr());
}
let mut output = Vec::<*mut u8>::with_capacity(f as usize);
for x in parity.iter_mut().take(f as usize) {
output.push(x.as_mut_ptr());
}
unsafe{ codec.encode(len, &input, &mut output); }
// Iterate over all possible failure combinations
for erasures_vec in (0..m).combinations(f as usize) {
// Don't attempt to decode if the only missing columns are parity
if erasures_vec[0] >= k {
continue;
}
// Decode
let mut surviving = Vec::<*const u8>::with_capacity(m as usize);
let mut erasures = FixedBitSet::with_capacity(m as usize);
for b in &erasures_vec {
erasures.insert(*b as usize);
}
let mut skips = 0;
for i in 0..(k as usize) {
while erasures.contains(i + skips) {
skips += 1;
}
let r = i + skips;
if r < k as usize {
surviving.push(data[r].as_ptr());
} else {
surviving.push(parity[r - k as usize].as_ptr());
}
}
let data_errs = erasures.count_ones(..k as usize);
let mut decoded = Vec::<*mut u8>::with_capacity(data_errs);
for x in reconstructed.iter_mut().take(data_errs) {
decoded.push(x.as_mut_ptr());
}
unsafe { codec.decode(len, &surviving, &mut decoded, &erasures); }
// Finally, compare
for i in 0..data_errs {
assert_eq!(&data[erasures_vec[i] as usize], &reconstructed[i],
"miscompare for m={:?}, f={:?}, erasures={:?}",
m, f, erasures_vec);
}
}
}
}
// Test basic RAID functionality using a small chunksize
#[test]
pub fn encode_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[d0.as_ptr(), d1.as_ptr()], &mut [p0.as_mut_ptr()]);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// Test encoding from discontiguous data columns
#[test]
pub fn encodev() {
let len = 16;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, make the reference parity using contiguous encode
let mut da0 = vec![0u8;len];
let mut da1 = vec![0u8;len];
let mut pa0 = vec![0u8;len];
for i in 0..len {
da0[i] = rng.gen();
da1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[da0.as_ptr(), da1.as_ptr()],
&mut [pa0.as_mut_ptr()]);
}
// Next, split the same data into discontiguous SGLists
// First segments are identically sized
let db0p0 = DivBufShared::from(Vec::from(&da0[0..4]));
let db1p0 = DivBufShared::from(Vec::from(&da1[0..4]));
// db0 has longer 2nd segment
let db0p1 = DivBufShared::from(Vec::from(&da0[4..9]));
let db1p1 = DivBufShared::from(Vec::from(&da1[4..8]));
// db1 has longer 3rd segment
let db0p2 = DivBufShared::from(Vec::from(&da0[9..14]));
let db1p2 = DivBufShared::from(Vec::from(&da1[8..14]));
// final segments are identically sized
let db0p3 = DivBufShared::from(Vec::from(&da0[14..len]));
// final segments are identically sized
let db1p3 = DivBufShared::from(Vec::from(&da1[14..len]));
let sgb0 = vec![db0p0.try_const().unwrap(),
db0p1.try_const().unwrap(),
db0p2.try_const().unwrap(),
db0p3.try_const().unwrap()];
let sgb1 = vec![db1p0.try_const().unwrap(),
db1p1.try_const().unwrap(),
db1p2.try_const().unwrap(),
db1p3.try_const().unwrap()];
let data = vec![sgb0, sgb1];
let mut pa1 = vec![0u8; len];
let mut pslice = [pa1.as_mut_ptr()];
unsafe { codec.encodev(len, &data, &mut pslice[..]); }
assert_eq!(pa0, pa1);
}
// Test basic RAID update functionality using a small chunksize
#[test]
pub fn encode_update_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode_update(len, &d0, &mut [p0.as_mut_ptr()], 0);
codec.encode_update(len, &d1, &mut [p0.as_mut_ptr()], 1);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// If the encoding matrix ever changes, it will change the on-disk format.
// Generate several different encoding matrices and compare them against
// golden masters
#[test]
fn format_stability() {
let testpairs = [
(3, 1, vec![1, 0,
0, 1,
1, 1]),
(5, 1, vec![1, 0, 0 | {
break; // Exclude missing parity columns
} | conditional_block |
codec.rs | // single parity arrays for compatibility with a faster future codec.
if f == 1 {
isa_l::gf_gen_rs_matrix(&mut enc_matrix, m, k);
} else {
isa_l::gf_gen_cauchy1_matrix(&mut enc_matrix, m, k);
}
// The encoding tables only use the encoding matrix's parity rows (e.g.
// rows k and higher)
isa_l::ec_init_tables(k, f, &enc_matrix[(k*k) as usize..],
&mut enc_tables);
Codec {m, f, enc_matrix, enc_tables}
}
/// Verify parity and identify corrupt columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Data array: `k` columns of `len` bytes each
/// - `parity`: Parity array: `f` columns of `len` bytes each
///
/// # Returns
///
/// A bitset identifies which columns are corrupt. A 1 indicates a corrupt
/// column and a 0 indicates a healthy column. If the parity does not
/// verify successfully but it cannot be determined which column(s) are
/// corrupt, then all bits will be set. All bits set indicates that the row
/// is irrecoverable without additional information. Note that when the
/// number of corrupt columns equals `f` the row will be considered
/// irrecoverable even though the original data can still be recovered via
/// combinatorial reconstruction.
pub unsafe fn | (&self, _len: usize, _data: &[*const u8],
_parity: &[*const u8]) -> FixedBitSet {
panic!("Unimplemented");
}
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and
/// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
}
data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
input.push(x.as_ptr());
}
let mut output = Vec::<*mut u8>::with_capacity(f as usize);
for x in parity.iter_mut().take(f as usize) {
output.push(x.as_mut_ptr());
}
unsafe{ codec.encode(len, &input, &mut output); }
// Iterate over all possible failure combinations
for erasures_vec in (0..m).combinations(f as usize) {
// Don't attempt to decode if the only missing columns are parity
if erasures_vec[0] >= k {
continue;
}
// Decode
let mut surviving = Vec::<*const u8>::with_capacity(m as usize);
let mut erasures = FixedBitSet::with_capacity(m as usize);
for b in &erasures_vec {
erasures.insert(*b as usize);
}
let mut skips = 0;
for i in 0..(k as usize) {
while erasures.contains(i + skips) {
skips += 1;
}
let r = i + skips;
if r < k as usize {
surviving.push(data[r].as_ptr());
} else {
surviving.push(parity[r - k as usize].as_ptr());
}
}
let data_errs = erasures.count_ones(..k as usize);
let mut decoded = Vec::<*mut u8>::with_capacity(data_errs);
for x in reconstructed.iter_mut().take(data_errs) {
decoded.push(x.as_mut_ptr());
}
unsafe { codec.decode(len, &surviving, &mut decoded, &erasures); }
// Finally, compare
for i in 0..data_errs {
assert_eq!(&data[erasures_vec[i] as usize], &reconstructed[i],
"miscompare for m={:?}, f={:?}, erasures={:?}",
m, f, erasures_vec);
}
}
}
}
// Test basic RAID functionality using a small chunksize
#[test]
pub fn encode_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[d0.as_ptr(), d1.as_ptr()], &mut [p0.as_mut_ptr()]);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// Test encoding from discontiguous data columns
#[test]
pub fn encodev() {
let len = 16;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, make the reference parity using contiguous encode
let mut da0 = vec![0u8;len];
let mut da1 = vec![0u8;len];
let mut pa0 = vec![0u8;len];
for i in 0..len {
da0[i] = rng.gen();
da1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[da0.as_ptr(), da1.as_ptr()],
&mut [pa0.as_mut_ptr()]);
}
// Next, split the same data into discontiguous SGLists
// First segments are identically sized
let db0p0 = DivBufShared::from(Vec::from(&da0[0..4]));
let db1p0 = DivBufShared::from(Vec::from(&da1[0..4]));
// db0 has longer 2nd segment
let db0p1 = DivBufShared::from(Vec::from(&da0[4..9]));
let db1p1 = DivBufShared::from(Vec::from(&da1[4..8]));
// db1 has longer 3rd segment
let db0p2 = DivBufShared::from(Vec::from(&da0[9..14]));
let db1p2 = DivBufShared::from(Vec::from(&da1[8..14]));
// final segments are identically sized
let db0p3 = DivBufShared::from(Vec::from(&da0[14..len]));
// final segments are identically sized
let db1p3 = DivBufShared::from(Vec::from(&da1[14..len]));
let sgb0 = vec![db0p0.try_const().unwrap(),
db0p1.try_const().unwrap(),
db0p2.try_const().unwrap(),
db0p3.try_const().unwrap()];
let sgb1 = vec![db1p0.try_const().unwrap(),
db1p1.try_const().unwrap(),
db1p2.try_const().unwrap(),
db1p3.try_const().unwrap()];
let data = vec![sgb0, sgb1];
let mut pa1 = vec![0u8; len];
let mut pslice = [pa1.as_mut_ptr()];
unsafe { codec.encodev(len, &data, &mut pslice[..]); }
assert_eq!(pa0, pa1);
}
// Test basic RAID update functionality using a small chunksize
#[test]
pub fn encode_update_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode_update(len, &d0, &mut [p0.as_mut_ptr()], 0);
codec.encode_update(len, &d1, &mut [p0.as_mut_ptr()], 1);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// If the encoding matrix ever changes, it will change the on-disk format.
// Generate several different encoding matrices and compare them against
// golden masters
#[test]
fn format_stability() {
let testpairs = [
(3, 1, vec![1, 0,
0, 1,
1, 1]),
(5, 1, vec![1, 0, 0 | check | identifier_name |
codec.rs | // single parity arrays for compatibility with a faster future codec.
if f == 1 {
isa_l::gf_gen_rs_matrix(&mut enc_matrix, m, k);
} else {
isa_l::gf_gen_cauchy1_matrix(&mut enc_matrix, m, k);
}
// The encoding tables only use the encoding matrix's parity rows (e.g.
// rows k and higher)
isa_l::ec_init_tables(k, f, &enc_matrix[(k*k) as usize..],
&mut enc_tables);
Codec {m, f, enc_matrix, enc_tables}
}
/// Verify parity and identify corrupt columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Data array: `k` columns of `len` bytes each
/// - `parity`: Parity array: `f` columns of `len` bytes each
///
/// # Returns
///
/// A bitset identifies which columns are corrupt. A 1 indicates a corrupt
/// column and a 0 indicates a healthy column. If the parity does not
/// verify successfully but it cannot be determined which column(s) are
/// corrupt, then all bits will be set. All bits set indicates that the row
/// is irrecoverable without additional information. Note that when the
/// number of corrupt columns equals `f` the row will be considered
/// irrecoverable even though the original data can still be recovered via
/// combinatorial reconstruction.
pub unsafe fn check(&self, _len: usize, _data: &[*const u8],
_parity: &[*const u8]) -> FixedBitSet {
panic!("Unimplemented");
}
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and
/// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() | data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
input.push(x.as_ptr());
}
let mut output = Vec::<*mut u8>::with_capacity(f as usize);
for x in parity.iter_mut().take(f as usize) {
output.push(x.as_mut_ptr());
}
unsafe{ codec.encode(len, &input, &mut output); }
// Iterate over all possible failure combinations
for erasures_vec in (0..m).combinations(f as usize) {
// Don't attempt to decode if the only missing columns are parity
if erasures_vec[0] >= k {
continue;
}
// Decode
let mut surviving = Vec::<*const u8>::with_capacity(m as usize);
let mut erasures = FixedBitSet::with_capacity(m as usize);
for b in &erasures_vec {
erasures.insert(*b as usize);
}
let mut skips = 0;
for i in 0..(k as usize) {
while erasures.contains(i + skips) {
skips += 1;
}
let r = i + skips;
if r < k as usize {
surviving.push(data[r].as_ptr());
} else {
surviving.push(parity[r - k as usize].as_ptr());
}
}
let data_errs = erasures.count_ones(..k as usize);
let mut decoded = Vec::<*mut u8>::with_capacity(data_errs);
for x in reconstructed.iter_mut().take(data_errs) {
decoded.push(x.as_mut_ptr());
}
unsafe { codec.decode(len, &surviving, &mut decoded, &erasures); }
// Finally, compare
for i in 0..data_errs {
assert_eq!(&data[erasures_vec[i] as usize], &reconstructed[i],
"miscompare for m={:?}, f={:?}, erasures={:?}",
m, f, erasures_vec);
}
}
}
}
// Test basic RAID functionality using a small chunksize
#[test]
pub fn encode_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[d0.as_ptr(), d1.as_ptr()], &mut [p0.as_mut_ptr()]);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// Test encoding from discontiguous data columns
#[test]
pub fn encodev() {
let len = 16;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, make the reference parity using contiguous encode
let mut da0 = vec![0u8;len];
let mut da1 = vec![0u8;len];
let mut pa0 = vec![0u8;len];
for i in 0..len {
da0[i] = rng.gen();
da1[i] = rng.gen();
}
unsafe {
codec.encode(len, &[da0.as_ptr(), da1.as_ptr()],
&mut [pa0.as_mut_ptr()]);
}
// Next, split the same data into discontiguous SGLists
// First segments are identically sized
let db0p0 = DivBufShared::from(Vec::from(&da0[0..4]));
let db1p0 = DivBufShared::from(Vec::from(&da1[0..4]));
// db0 has longer 2nd segment
let db0p1 = DivBufShared::from(Vec::from(&da0[4..9]));
let db1p1 = DivBufShared::from(Vec::from(&da1[4..8]));
// db1 has longer 3rd segment
let db0p2 = DivBufShared::from(Vec::from(&da0[9..14]));
let db1p2 = DivBufShared::from(Vec::from(&da1[8..14]));
// final segments are identically sized
let db0p3 = DivBufShared::from(Vec::from(&da0[14..len]));
// final segments are identically sized
let db1p3 = DivBufShared::from(Vec::from(&da1[14..len]));
let sgb0 = vec![db0p0.try_const().unwrap(),
db0p1.try_const().unwrap(),
db0p2.try_const().unwrap(),
db0p3.try_const().unwrap()];
let sgb1 = vec![db1p0.try_const().unwrap(),
db1p1.try_const().unwrap(),
db1p2.try_const().unwrap(),
db1p3.try_const().unwrap()];
let data = vec![sgb0, sgb1];
let mut pa1 = vec![0u8; len];
let mut pslice = [pa1.as_mut_ptr()];
unsafe { codec.encodev(len, &data, &mut pslice[..]); }
assert_eq!(pa0, pa1);
}
// Test basic RAID update functionality using a small chunksize
#[test]
pub fn encode_update_decode() {
let len = 8;
let codec = Codec::new(3, 1);
let mut rng = rand::thread_rng();
// First, encode
let mut d0 = vec![0u8;len];
let mut d1 = vec![0u8;len];
let mut p0 = vec![0u8;len];
for i in 0..len {
d0[i] = rng.gen();
d1[i] = rng.gen();
}
unsafe {
codec.encode_update(len, &d0, &mut [p0.as_mut_ptr()], 0);
codec.encode_update(len, &d1, &mut [p0.as_mut_ptr()], 1);
}
// Now delete column 0 and rebuild
let mut r0 = vec![0u8;len];
let mut erasures = FixedBitSet::with_capacity(3);
erasures.insert(0);
unsafe {
codec.decode(len, &[d1.as_ptr(), p0.as_ptr()], &mut [r0.as_mut_ptr()],
&erasures);
}
// Verify that column was reconstructed correctly
assert_eq!(d0, r0);
}
// If the encoding matrix ever changes, it will change the on-disk format.
// Generate several different encoding matrices and compare them against
// golden masters
#[test]
fn format_stability() {
let testpairs = [
(3, 1, vec![1, 0,
0, 1,
1, 1]),
(5, 1, vec![1, 0, 0 | {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
} | identifier_body |
level.rs |
use ggez;
use ggez::graphics;
use ggez_goodies::scene;
use ggez_goodies::tilemap::tiled as tiled;
use ggez_goodies::tilemap::Map as Map;
use log::*;
use specs::{self, Join};
use specs::world::Builder;
use warmy;
// use std::path;
use ggez::nalgebra as na;
use ncollide2d as nc;
use crate::components as c;
use crate::util;
use crate::input;
use crate::resources;
use crate::scenes;
use crate::systems::*;
use crate::world::World;
// use euclid;
const MIN_VELOCITY: f32 = -0.5;
const MAX_VELOCITY: f32 = 2.0;
const TAU: f32 = std::f32::consts::PI * 2.0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static,'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static,'static> |
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2::new(offset_x, offset_y).into();
params.dest = na::Point2::new(camera_offset.x + p.point.x, camera_offset.y + p.point.y).into();
graphics::draw(
ctx,
&(self.car.borrow().0),
params,
)?;
}
// ui
let motions = gameworld.specs_world.read_storage::<c::Motion>();
let pm = motions.get(self.player_entity).expect("Player w/o motion?");
let text = graphics::Text::new(format!("o = {}, v = {}, x = {}, y = {}", pm.orientation, pm.acceleration.y, pm.velocity.x, pm.velocity.y));
graphics::draw(ctx, &text, graphics::DrawParam::default().dest(na::Point2::new(0.0, 0.0))).unwrap();
Ok(())
}
fn name(&self) -> &str {
"LevelScene"
}
fn input(&mut self, gameworld: &mut World, ev: input::Event, _started: bool) {
debug!("Input: {:?}", ev);
if gameworld.input.get_button_pressed(input::Button::Menu) {
self.done = true;
}
let mut motions = gameworld.specs_world.write_storage::<c::Motion>();
let motion = motions.get_mut(self.player_entity).expect("Player w/o motion?");
if!motion.is_blocked {
// update steering to set orientation from 0 to pi
let steering_input = gameworld.input.get_axis(input::Axis::Horz);
motion.orientation += steering_input / 4.0;
if motion.orientation < 0.0 {
motion.orientation += TAU;
} else if motion.orientation > TAU {
motion.orientation -= TAU;
}
// this is y-velocity, not acceleration
let accel_input = gameworld.input.get_axis(input::Axis::Vert);
motion.acceleration.y += accel_input;
motion.acceleration.y = motion.acceleration.y.max(MIN_VELOCITY).min(MAX_VELOCITY);
}
// calculate new velocity
motion.update();
}
}
| {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
} | identifier_body |
level.rs |
use ggez;
use ggez::graphics;
use ggez_goodies::scene;
use ggez_goodies::tilemap::tiled as tiled;
use ggez_goodies::tilemap::Map as Map;
use log::*;
use specs::{self, Join};
use specs::world::Builder;
use warmy;
// use std::path;
use ggez::nalgebra as na;
use ncollide2d as nc;
use crate::components as c;
use crate::util;
use crate::input;
use crate::resources;
use crate::scenes;
use crate::systems::*;
use crate::world::World;
// use euclid;
const MIN_VELOCITY: f32 = -0.5;
const MAX_VELOCITY: f32 = 2.0;
const TAU: f32 = std::f32::consts::PI * 2.0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static,'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static,'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
| // motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2::new(offset_x, offset_y).into();
params.dest = na::Point2::new(camera_offset.x + p.point.x, camera_offset.y + p.point.y).into();
graphics::draw(
ctx,
&(self.car.borrow().0),
params,
)?;
}
// ui
let motions = gameworld.specs_world.read_storage::<c::Motion>();
let pm = motions.get(self.player_entity).expect("Player w/o motion?");
let text = graphics::Text::new(format!("o = {}, v = {}, x = {}, y = {}", pm.orientation, pm.acceleration.y, pm.velocity.x, pm.velocity.y));
graphics::draw(ctx, &text, graphics::DrawParam::default().dest(na::Point2::new(0.0, 0.0))).unwrap();
Ok(())
}
fn name(&self) -> &str {
"LevelScene"
}
fn input(&mut self, gameworld: &mut World, ev: input::Event, _started: bool) {
debug!("Input: {:?}", ev);
if gameworld.input.get_button_pressed(input::Button::Menu) {
self.done = true;
}
let mut motions = gameworld.specs_world.write_storage::<c::Motion>();
let motion = motions.get_mut(self.player_entity).expect("Player w/o motion?");
if!motion.is_blocked {
// update steering to set orientation from 0 to pi
let steering_input = gameworld.input.get_axis(input::Axis::Horz);
motion.orientation += steering_input / 4.0;
if motion.orientation < 0.0 {
motion.orientation += TAU;
} else if motion.orientation > TAU {
motion.orientation -= TAU;
}
// this is y-velocity, not acceleration
let accel_input = gameworld.input.get_axis(input::Axis::Vert);
motion.acceleration.y += accel_input;
motion.acceleration.y = motion.acceleration.y.max(MIN_VELOCITY).min(MAX_VELOCITY);
}
// calculate new velocity
motion.update();
}
}
| {
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update(); | conditional_block |
level.rs | use ggez;
use ggez::graphics;
use ggez_goodies::scene;
use ggez_goodies::tilemap::tiled as tiled;
use ggez_goodies::tilemap::Map as Map;
use log::*;
use specs::{self, Join};
use specs::world::Builder;
use warmy;
// use std::path;
use ggez::nalgebra as na;
use ncollide2d as nc;
use crate::components as c;
use crate::util;
use crate::input;
use crate::resources;
use crate::scenes;
use crate::systems::*;
use crate::world::World;
// use euclid;
const MIN_VELOCITY: f32 = -0.5;
const MAX_VELOCITY: f32 = 2.0;
const TAU: f32 = std::f32::consts::PI * 2.0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static,'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static,'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
| fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2::new(offset_x, offset_y).into();
params.dest = na::Point2::new(camera_offset.x + p.point.x, camera_offset.y + p.point.y).into();
graphics::draw(
ctx,
&(self.car.borrow().0),
params,
)?;
}
// ui
let motions = gameworld.specs_world.read_storage::<c::Motion>();
let pm = motions.get(self.player_entity).expect("Player w/o motion?");
let text = graphics::Text::new(format!("o = {}, v = {}, x = {}, y = {}", pm.orientation, pm.acceleration.y, pm.velocity.x, pm.velocity.y));
graphics::draw(ctx, &text, graphics::DrawParam::default().dest(na::Point2::new(0.0, 0.0))).unwrap();
Ok(())
}
fn name(&self) -> &str {
"LevelScene"
}
fn input(&mut self, gameworld: &mut World, ev: input::Event, _started: bool) {
debug!("Input: {:?}", ev);
if gameworld.input.get_button_pressed(input::Button::Menu) {
self.done = true;
}
let mut motions = gameworld.specs_world.write_storage::<c::Motion>();
let motion = motions.get_mut(self.player_entity).expect("Player w/o motion?");
if!motion.is_blocked {
// update steering to set orientation from 0 to pi
let steering_input = gameworld.input.get_axis(input::Axis::Horz);
motion.orientation += steering_input / 4.0;
if motion.orientation < 0.0 {
motion.orientation += TAU;
} else if motion.orientation > TAU {
motion.orientation -= TAU;
}
// this is y-velocity, not acceleration
let accel_input = gameworld.input.get_axis(input::Axis::Vert);
motion.acceleration.y += accel_input;
motion.acceleration.y = motion.acceleration.y.max(MIN_VELOCITY).min(MAX_VELOCITY);
}
// calculate new velocity
motion.update();
}
} | // fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene { | random_line_split |
level.rs |
use ggez;
use ggez::graphics;
use ggez_goodies::scene;
use ggez_goodies::tilemap::tiled as tiled;
use ggez_goodies::tilemap::Map as Map;
use log::*;
use specs::{self, Join};
use specs::world::Builder;
use warmy;
// use std::path;
use ggez::nalgebra as na;
use ncollide2d as nc;
use crate::components as c;
use crate::util;
use crate::input;
use crate::resources;
use crate::scenes;
use crate::systems::*;
use crate::world::World;
// use euclid;
const MIN_VELOCITY: f32 = -0.5;
const MAX_VELOCITY: f32 = 2.0;
const TAU: f32 = std::f32::consts::PI * 2.0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static,'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn | () -> specs::Dispatcher<'static,'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2::new(offset_x, offset_y).into();
params.dest = na::Point2::new(camera_offset.x + p.point.x, camera_offset.y + p.point.y).into();
graphics::draw(
ctx,
&(self.car.borrow().0),
params,
)?;
}
// ui
let motions = gameworld.specs_world.read_storage::<c::Motion>();
let pm = motions.get(self.player_entity).expect("Player w/o motion?");
let text = graphics::Text::new(format!("o = {}, v = {}, x = {}, y = {}", pm.orientation, pm.acceleration.y, pm.velocity.x, pm.velocity.y));
graphics::draw(ctx, &text, graphics::DrawParam::default().dest(na::Point2::new(0.0, 0.0))).unwrap();
Ok(())
}
fn name(&self) -> &str {
"LevelScene"
}
fn input(&mut self, gameworld: &mut World, ev: input::Event, _started: bool) {
debug!("Input: {:?}", ev);
if gameworld.input.get_button_pressed(input::Button::Menu) {
self.done = true;
}
let mut motions = gameworld.specs_world.write_storage::<c::Motion>();
let motion = motions.get_mut(self.player_entity).expect("Player w/o motion?");
if!motion.is_blocked {
// update steering to set orientation from 0 to pi
let steering_input = gameworld.input.get_axis(input::Axis::Horz);
motion.orientation += steering_input / 4.0;
if motion.orientation < 0.0 {
motion.orientation += TAU;
} else if motion.orientation > TAU {
motion.orientation -= TAU;
}
// this is y-velocity, not acceleration
let accel_input = gameworld.input.get_axis(input::Axis::Vert);
motion.acceleration.y += accel_input;
motion.acceleration.y = motion.acceleration.y.max(MIN_VELOCITY).min(MAX_VELOCITY);
}
// calculate new velocity
motion.update();
}
}
| register_systems | identifier_name |
lib.rs | .0, 0.0)
}
}
impl Default for Settings {
fn default() -> Settings {
Settings {
text_width: 8.0,
text_height: 16.0,
}
}
}
enum Anchor{
Start,
Middle,
End
}
#[derive(Debug)]
struct Body{
memes: Vec<Meme>,
rest_str: Vec<(usize, String)>
}
impl Body {
fn has_memes(&self) -> bool{
!self.memes.is_empty()
}
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut svg:Vec<Box<Node>> = vec![];
for meme in &self.memes{
svg.extend(meme.get_svg_elements(y, settings));
}
svg
}
// build the rest text in 1 string
fn unify_rest_text(&self) -> String{
let mut unify = String::new();
for &(sx, ref word) in &self.rest_str{
let lacks = sx - unify.width();
if lacks > 0{
for _ in 0..lacks{
unify.push(' ')
}
}
unify.push_str(word);
}
unify
}
}
/// The whole meme body
///
#[derive(Clone,Debug)]
struct Meme{
/// location to the left until a space is encountered
start_position: usize,
head: Head,
/// location to the right until a space is encoutered
end_position: usize,
/// string at the left
left_side: String,
/// string at the right
right_side: String
}
impl Meme{
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut elements:Vec<Box<Node>> = vec![];
let left_text = to_svg_text(&self.left_side, self.head.startx, y, settings, Anchor::End);
elements.push(Box::new(left_text));
elements.extend(self.head.get_svg_elements(y, settings));
let right_text = to_svg_text(&self.right_side, self.head.endx, y, settings, Anchor::Start);
elements.push(Box::new(right_text));
elements
}
}
fn to_svg_text(s: &str, x: usize, y: usize, settings: &Settings, anchor: Anchor) -> SvgText {
let px = x as f32 * settings.text_width;
let py = y as f32 * settings.text_height;
to_svg_text_pixel(s, px, py, settings, anchor)
}
fn to_svg_text_pixel(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
to_svg_text_pixel_escaped(&escape_str(s), x, y, settings, anchor)
}
fn to_svg_text_pixel_escaped(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
let (offsetx, offsety) = settings.offset();
let sx = x + offsetx;
let sy = y + settings.text_height * 3.0 / 4.0 + offsety;
let mut svg_text = SvgText::new()
.set("x", sx)
.set("y", sy);
match anchor{
Anchor::Start => {
svg_text.assign("text-anchor", "start");
}
Anchor::Middle => {
svg_text.assign("text-anchor", "middle");
}
Anchor::End => {
svg_text.assign("text-anchor", "end");
}
};
let text_node = TextNode::new(s);
svg_text.append(text_node);
svg_text
}
/// The head of the meme
/// the face is the string in between
/// used in detecting if it's a valid meme or not
#[derive(Clone,Debug)]
struct Head{
// character position
start_position: usize,
// left x location x1
startx: usize,
face: String,
// right x location x2
endx: usize,
// end position
end_position: usize
}
impl Head{
fn distance(&self) -> usize {
self.endx - self.startx
}
fn get_svg_elements(&self, y: usize, settings:&Settings) -> Vec<Box<Node>> {
let mut elements: Vec<Box<Node>> = vec![];
elements.push(Box::new(self.get_circle(y, settings)));
elements.push(Box::new(self.get_face_text(y, settings)));
elements
}
fn get_face_text(&self, y:usize, settings: &Settings) -> SvgText{
let c = self.calc_circle(y, settings);
let sy = y as f32 * settings.text_height;
let face = format!("<tspan class='head'>(</tspan>{}<tspan class='head'>)</tspan>", escape_str(&self.face));
to_svg_text_pixel_escaped(&face, c.cx, sy, settings, Anchor::Middle)
}
fn calc_circle(&self, y:usize, settings: &Settings) -> Circle {
let text_width = settings.text_width;
let text_height = settings.text_height;
let radius = self.distance() as f32 / 2.0;
let center = self. startx as f32 + radius;
let cx = center * text_width;
let cy = y as f32 * text_height + text_height / 2.0;
let cr = radius * text_width;
Circle{
cx: cx,
cy: cy,
r: cr
}
}
fn get_circle(&self, y: usize, settings: &Settings)-> SvgCircle{
let c = self.calc_circle(y, settings);
let (offsetx, offsety) = settings.offset();
SvgCircle::new()
.set("cx",c.cx + offsetx)
.set("cy", c.cy + offsety)
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
||!is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node +'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => |
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let body = parse_memes(s);
if body.has_memes(){
let nodes = body.get_svg_elements(y, settings);
Some((nodes, body.unify_rest_text()))
}else{
None
}
}
#[test]
fn test_1line(){
let meme = "";
let nodes = get_svg_elements(0, meme, &Settings::default());
assert_eq!(nodes.len(), 0);
}
/// TODO: include parsing the rest of the unused text
fn parse_memes(s: &str) -> Body{
let mut memes = vec![];
let mut paren_opened = false;
let mut meme_face = String::new();
let mut index = 0;
let mut total_width = 0;
let mut face_markers:Vec<Head> = vec![];
let mut startx = 0;
let mut start_position = 0;
let mut meme_start = 0;
let mut meme_body = String::new();
let mut meme_left_side = String::new();
let mut meme_right_side = String::new();
let mut meme_head = None;
let total_chars = s.chars().count();
let mut rest_text:Vec<(usize, String)> = vec![];
for ch in s.chars(){
let last_char = index == total_chars - 1;
if meme_head.is_some(){
meme_right_side.push(ch);
}
if paren_opened && ch == ')'{ //if paren_opened and encountered a closing
paren_opened = false;
if is_meme(&meme_face){
let head = Head{
start_position: start_position,
startx: startx,
face: meme_face.clone(),
end_position: index,
endx: total_width,
};
meme_head = Some(head.clone());
face_markers.push(head);
meme_face.clear();
}
}
if paren_opened{
meme_face.push(ch);
}
if ch == '('{
paren_opened = true;
startx = total_width;
start_position = index;
meme_left_side = meme_body.clone();
meme_face.clear();
}
if meme_head.is_none() && (ch =='' || last_char){
meme_start = index + 1;
if!paren_opened{
let mut rest_word = meme_body.clone();
let rest_start = total_width - rest_word.width();
if last_char{
rest_word.push(ch);
rest_word.push_str(&meme_face);//the head is unmatched
}
rest_text.push((rest_start, rest_word));
}
meme_body.clear();
}
if meme_head.is_some() && (ch =='' || last_char){
let meme = Meme{
start_position: meme_start,
head: meme_head.clone().unwrap(),
end_position: index,
left_side: meme_left_side.clone(),
right_side: meme_right_side.clone(),
};
memes.push(meme);
meme_right_side.clear();
meme_left_side.clear();
meme_body.clear();
meme_head = None;
}
meme_body.push(ch);
if let Some(uw) = ch.width(){
total_width += uw;
}
index += 1;
}
Body{
memes: memes,
rest_str: regroup_rest_text(&rest_text)
}
}
fn regroup_rest_text(rest_text: &Vec<(usize, String)>)->Vec<(usize, String)>{
let mut new_group = vec![];
//println!("regrouping text..");
for &(start,ref rest) in rest_text{
if new_group.is_empty(){
new_group.push((start, rest.clone()));
}else{
if let Some((lastx, last_rest)) = new_group.pop(){
if lastx + last_rest.width() == start{
let mut merged = String::new();
merged.push_str(&last_rest);
merged.push_str(rest);
new_group.push((lastx, merged));
}else{
new_group.push((lastx, last_rest));
new_group.push((start, rest.clone()));
}
}
}
}
//println!("new_group: {:#?}", new_group);
new_group
}
#[test]
fn test_meme() {
assert!(is_meme(" ͡° ͜ʖ ͡°"));
assert!(is_meme("⌐■_■"));
assert!(is_meme("ツ"));
assert!(!is_meme("svgbobg"));
assert!(!is_meme("not a meme in space"));
assert!(is_meme(" -_- "));
assert!(is_meme("-_-"));
assert!(!is_meme(" "));
}
#[test]
fn test_expression(){
assert!(is_meme("^_^"));
assert!(is_meme("x_x"));
assert!(!is_meme("+"));
assert!(!is_meme("x+y"));
assert!(!is_meme("x^2*y^2"));
assert!(!is_meme("x^2 * y^2"));
}
#[test]
fn test_bound(){
let meme = "(♥_♥)";
let memes = parse_memes(meme);
for m in memes.memes{
let b = m.head;
println!("bound {:?} d:{} ", b, b.distance());
assert_eq!(4, b.distance());
}
}
fn escape_str(s: &str) -> String{
let mut escaped = String::new();
for c in s.chars(){
escaped.push_str(&escape_char(&c));
}
escaped
}
fn escape_char(ch: &char) -> String {
let escs = [('"', """), ('\'', "'"), ('<', "<"), ('>', ">"), ('&', "&")];
let quote_match: Option<&(char, &str)> = escs.iter()
.find(|pair| {
let &(e, _) = *pair;
e == *ch
});
let quoted: String = match quote_match {
Some(&(_, quoted)) => String::from(quoted),
None => {
let mut s = String::new();
s.push(*ch);
s
}
};
quoted
}
fn is_operator(c: char) -> bool{
c == '+' || c == '-' || c == '*' || c == '/'
|| c == '^' || c == '%' || c == '!' || c == ','
|| c == '.' || c == '=' || c == '|' || c == '&'
}
#[test]
fn test_operator(){
assert!(!is_expression("^_^"));
assert!(is_operator('+'));
assert!(is_expression("+"));
assert!(is_expression("x+y"));
assert!(is_expression("x^2*y^2"));
assert!(is_expression("x^2 * y^2"));
}
//TODO: alternate alphanumeric_space and operator
fn is_expression(ch: &str) -> bool{
is_alphanumeric_space_operator(ch)
}
fn is_alphanumeric_space_operator(ch:&str) -> bool{
ch.chars().all(|c| c.is_alphanumeric() || c =='' || c == '_' || is_operator(c))
}
#[test]
fn test_body(){
let meme = "( ^_^)ノ";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(1, bodies.memes.len());
}
#[test]
fn test_body2(){
let meme = "ヘ( ^_^)ノ \(^_^ )Gimme Five";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(2, bodies.memes.len());
}
#[test]
fn test_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
}
#[test]
fn test_unify_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
let resi = r#"The rest of the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
assert_eq!(meme.width(), bodies.unify_rest_text().width());
println!("residue: {} | {
relines.push_str(line);
relines.push('\n');
} | conditional_block |
lib.rs | .0, 0.0)
}
}
impl Default for Settings {
fn default() -> Settings {
Settings {
text_width: 8.0,
text_height: 16.0,
}
}
}
enum Anchor{
Start,
Middle,
End
}
#[derive(Debug)]
struct Body{
memes: Vec<Meme>,
rest_str: Vec<(usize, String)>
}
impl Body {
fn has_memes(&self) -> bool{
!self.memes.is_empty()
}
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut svg:Vec<Box<Node>> = vec![];
for meme in &self.memes{
svg.extend(meme.get_svg_elements(y, settings));
}
svg
}
// build the rest text in 1 string
fn unify_rest_text(&self) -> String{
let mut unify = String::new();
for &(sx, ref word) in &self.rest_str{
let lacks = sx - unify.width();
if lacks > 0{
for _ in 0..lacks{
unify.push(' ')
}
}
unify.push_str(word);
}
unify
}
}
/// The whole meme body
///
#[derive(Clone,Debug)]
struct Meme{
/// location to the left until a space is encountered
start_position: usize,
head: Head,
/// location to the right until a space is encoutered
end_position: usize,
/// string at the left
left_side: String,
/// string at the right
right_side: String
}
impl Meme{
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut elements:Vec<Box<Node>> = vec![];
let left_text = to_svg_text(&self.left_side, self.head.startx, y, settings, Anchor::End);
elements.push(Box::new(left_text));
elements.extend(self.head.get_svg_elements(y, settings));
let right_text = to_svg_text(&self.right_side, self.head.endx, y, settings, Anchor::Start);
elements.push(Box::new(right_text));
elements
}
}
fn to_svg_text(s: &str, x: usize, y: usize, settings: &Settings, anchor: Anchor) -> SvgText {
let px = x as f32 * settings.text_width;
let py = y as f32 * settings.text_height;
to_svg_text_pixel(s, px, py, settings, anchor)
}
fn | (s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
to_svg_text_pixel_escaped(&escape_str(s), x, y, settings, anchor)
}
fn to_svg_text_pixel_escaped(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
let (offsetx, offsety) = settings.offset();
let sx = x + offsetx;
let sy = y + settings.text_height * 3.0 / 4.0 + offsety;
let mut svg_text = SvgText::new()
.set("x", sx)
.set("y", sy);
match anchor{
Anchor::Start => {
svg_text.assign("text-anchor", "start");
}
Anchor::Middle => {
svg_text.assign("text-anchor", "middle");
}
Anchor::End => {
svg_text.assign("text-anchor", "end");
}
};
let text_node = TextNode::new(s);
svg_text.append(text_node);
svg_text
}
/// The head of the meme
/// the face is the string in between
/// used in detecting if it's a valid meme or not
#[derive(Clone,Debug)]
struct Head{
// character position
start_position: usize,
// left x location x1
startx: usize,
face: String,
// right x location x2
endx: usize,
// end position
end_position: usize
}
impl Head{
fn distance(&self) -> usize {
self.endx - self.startx
}
fn get_svg_elements(&self, y: usize, settings:&Settings) -> Vec<Box<Node>> {
let mut elements: Vec<Box<Node>> = vec![];
elements.push(Box::new(self.get_circle(y, settings)));
elements.push(Box::new(self.get_face_text(y, settings)));
elements
}
fn get_face_text(&self, y:usize, settings: &Settings) -> SvgText{
let c = self.calc_circle(y, settings);
let sy = y as f32 * settings.text_height;
let face = format!("<tspan class='head'>(</tspan>{}<tspan class='head'>)</tspan>", escape_str(&self.face));
to_svg_text_pixel_escaped(&face, c.cx, sy, settings, Anchor::Middle)
}
fn calc_circle(&self, y:usize, settings: &Settings) -> Circle {
let text_width = settings.text_width;
let text_height = settings.text_height;
let radius = self.distance() as f32 / 2.0;
let center = self. startx as f32 + radius;
let cx = center * text_width;
let cy = y as f32 * text_height + text_height / 2.0;
let cr = radius * text_width;
Circle{
cx: cx,
cy: cy,
r: cr
}
}
fn get_circle(&self, y: usize, settings: &Settings)-> SvgCircle{
let c = self.calc_circle(y, settings);
let (offsetx, offsety) = settings.offset();
SvgCircle::new()
.set("cx",c.cx + offsetx)
.set("cy", c.cy + offsety)
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
||!is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node +'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => {
relines.push_str(line);
relines.push('\n');
}
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let body = parse_memes(s);
if body.has_memes(){
let nodes = body.get_svg_elements(y, settings);
Some((nodes, body.unify_rest_text()))
}else{
None
}
}
#[test]
fn test_1line(){
let meme = "";
let nodes = get_svg_elements(0, meme, &Settings::default());
assert_eq!(nodes.len(), 0);
}
/// TODO: include parsing the rest of the unused text
fn parse_memes(s: &str) -> Body{
let mut memes = vec![];
let mut paren_opened = false;
let mut meme_face = String::new();
let mut index = 0;
let mut total_width = 0;
let mut face_markers:Vec<Head> = vec![];
let mut startx = 0;
let mut start_position = 0;
let mut meme_start = 0;
let mut meme_body = String::new();
let mut meme_left_side = String::new();
let mut meme_right_side = String::new();
let mut meme_head = None;
let total_chars = s.chars().count();
let mut rest_text:Vec<(usize, String)> = vec![];
for ch in s.chars(){
let last_char = index == total_chars - 1;
if meme_head.is_some(){
meme_right_side.push(ch);
}
if paren_opened && ch == ')'{ //if paren_opened and encountered a closing
paren_opened = false;
if is_meme(&meme_face){
let head = Head{
start_position: start_position,
startx: startx,
face: meme_face.clone(),
end_position: index,
endx: total_width,
};
meme_head = Some(head.clone());
face_markers.push(head);
meme_face.clear();
}
}
if paren_opened{
meme_face.push(ch);
}
if ch == '('{
paren_opened = true;
startx = total_width;
start_position = index;
meme_left_side = meme_body.clone();
meme_face.clear();
}
if meme_head.is_none() && (ch =='' || last_char){
meme_start = index + 1;
if!paren_opened{
let mut rest_word = meme_body.clone();
let rest_start = total_width - rest_word.width();
if last_char{
rest_word.push(ch);
rest_word.push_str(&meme_face);//the head is unmatched
}
rest_text.push((rest_start, rest_word));
}
meme_body.clear();
}
if meme_head.is_some() && (ch =='' || last_char){
let meme = Meme{
start_position: meme_start,
head: meme_head.clone().unwrap(),
end_position: index,
left_side: meme_left_side.clone(),
right_side: meme_right_side.clone(),
};
memes.push(meme);
meme_right_side.clear();
meme_left_side.clear();
meme_body.clear();
meme_head = None;
}
meme_body.push(ch);
if let Some(uw) = ch.width(){
total_width += uw;
}
index += 1;
}
Body{
memes: memes,
rest_str: regroup_rest_text(&rest_text)
}
}
fn regroup_rest_text(rest_text: &Vec<(usize, String)>)->Vec<(usize, String)>{
let mut new_group = vec![];
//println!("regrouping text..");
for &(start,ref rest) in rest_text{
if new_group.is_empty(){
new_group.push((start, rest.clone()));
}else{
if let Some((lastx, last_rest)) = new_group.pop(){
if lastx + last_rest.width() == start{
let mut merged = String::new();
merged.push_str(&last_rest);
merged.push_str(rest);
new_group.push((lastx, merged));
}else{
new_group.push((lastx, last_rest));
new_group.push((start, rest.clone()));
}
}
}
}
//println!("new_group: {:#?}", new_group);
new_group
}
#[test]
fn test_meme() {
assert!(is_meme(" ͡° ͜ʖ ͡°"));
assert!(is_meme("⌐■_■"));
assert!(is_meme("ツ"));
assert!(!is_meme("svgbobg"));
assert!(!is_meme("not a meme in space"));
assert!(is_meme(" -_- "));
assert!(is_meme("-_-"));
assert!(!is_meme(" "));
}
#[test]
fn test_expression(){
assert!(is_meme("^_^"));
assert!(is_meme("x_x"));
assert!(!is_meme("+"));
assert!(!is_meme("x+y"));
assert!(!is_meme("x^2*y^2"));
assert!(!is_meme("x^2 * y^2"));
}
#[test]
fn test_bound(){
let meme = "(♥_♥)";
let memes = parse_memes(meme);
for m in memes.memes{
let b = m.head;
println!("bound {:?} d:{} ", b, b.distance());
assert_eq!(4, b.distance());
}
}
fn escape_str(s: &str) -> String{
let mut escaped = String::new();
for c in s.chars(){
escaped.push_str(&escape_char(&c));
}
escaped
}
fn escape_char(ch: &char) -> String {
let escs = [('"', """), ('\'', "'"), ('<', "<"), ('>', ">"), ('&', "&")];
let quote_match: Option<&(char, &str)> = escs.iter()
.find(|pair| {
let &(e, _) = *pair;
e == *ch
});
let quoted: String = match quote_match {
Some(&(_, quoted)) => String::from(quoted),
None => {
let mut s = String::new();
s.push(*ch);
s
}
};
quoted
}
fn is_operator(c: char) -> bool{
c == '+' || c == '-' || c == '*' || c == '/'
|| c == '^' || c == '%' || c == '!' || c == ','
|| c == '.' || c == '=' || c == '|' || c == '&'
}
#[test]
fn test_operator(){
assert!(!is_expression("^_^"));
assert!(is_operator('+'));
assert!(is_expression("+"));
assert!(is_expression("x+y"));
assert!(is_expression("x^2*y^2"));
assert!(is_expression("x^2 * y^2"));
}
//TODO: alternate alphanumeric_space and operator
fn is_expression(ch: &str) -> bool{
is_alphanumeric_space_operator(ch)
}
fn is_alphanumeric_space_operator(ch:&str) -> bool{
ch.chars().all(|c| c.is_alphanumeric() || c =='' || c == '_' || is_operator(c))
}
#[test]
fn test_body(){
let meme = "( ^_^)ノ";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(1, bodies.memes.len());
}
#[test]
fn test_body2(){
let meme = "ヘ( ^_^)ノ \(^_^ )Gimme Five";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(2, bodies.memes.len());
}
#[test]
fn test_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
}
#[test]
fn test_unify_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
let resi = r#"The rest of the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
assert_eq!(meme.width(), bodies.unify_rest_text().width());
println!("residue: {} | to_svg_text_pixel | identifier_name |
lib.rs | (0.0, 0.0)
}
}
impl Default for Settings {
fn default() -> Settings {
Settings {
text_width: 8.0,
text_height: 16.0,
}
}
}
enum Anchor{
Start,
Middle,
End
}
#[derive(Debug)]
struct Body{
memes: Vec<Meme>,
rest_str: Vec<(usize, String)>
}
impl Body {
fn has_memes(&self) -> bool{
!self.memes.is_empty()
}
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut svg:Vec<Box<Node>> = vec![];
for meme in &self.memes{
svg.extend(meme.get_svg_elements(y, settings));
}
svg
}
// build the rest text in 1 string
fn unify_rest_text(&self) -> String{
let mut unify = String::new();
for &(sx, ref word) in &self.rest_str{
let lacks = sx - unify.width();
if lacks > 0{
for _ in 0..lacks{
unify.push(' ')
}
}
unify.push_str(word);
}
unify
}
}
/// The whole meme body
///
#[derive(Clone,Debug)]
struct Meme{
/// location to the left until a space is encountered
start_position: usize,
head: Head,
/// location to the right until a space is encoutered
end_position: usize,
/// string at the left
left_side: String,
/// string at the right
right_side: String
}
impl Meme{
fn get_svg_elements(&self, y: usize, settings: &Settings) -> Vec<Box<Node>>{
let mut elements:Vec<Box<Node>> = vec![];
let left_text = to_svg_text(&self.left_side, self.head.startx, y, settings, Anchor::End);
elements.push(Box::new(left_text));
elements.extend(self.head.get_svg_elements(y, settings));
let right_text = to_svg_text(&self.right_side, self.head.endx, y, settings, Anchor::Start);
elements.push(Box::new(right_text));
elements
}
}
fn to_svg_text(s: &str, x: usize, y: usize, settings: &Settings, anchor: Anchor) -> SvgText {
let px = x as f32 * settings.text_width;
let py = y as f32 * settings.text_height;
to_svg_text_pixel(s, px, py, settings, anchor)
}
fn to_svg_text_pixel(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
to_svg_text_pixel_escaped(&escape_str(s), x, y, settings, anchor)
}
fn to_svg_text_pixel_escaped(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
let (offsetx, offsety) = settings.offset();
let sx = x + offsetx;
let sy = y + settings.text_height * 3.0 / 4.0 + offsety;
let mut svg_text = SvgText::new()
.set("x", sx)
.set("y", sy);
match anchor{
Anchor::Start => {
svg_text.assign("text-anchor", "start");
}
Anchor::Middle => {
svg_text.assign("text-anchor", "middle");
}
Anchor::End => {
svg_text.assign("text-anchor", "end");
}
};
let text_node = TextNode::new(s);
svg_text.append(text_node);
svg_text
}
/// The head of the meme
/// the face is the string in between
/// used in detecting if it's a valid meme or not
#[derive(Clone,Debug)]
struct Head{
// character position
start_position: usize,
// left x location x1
startx: usize,
face: String,
// right x location x2
endx: usize,
// end position
end_position: usize
}
impl Head{
fn distance(&self) -> usize {
self.endx - self.startx
}
fn get_svg_elements(&self, y: usize, settings:&Settings) -> Vec<Box<Node>> {
let mut elements: Vec<Box<Node>> = vec![];
elements.push(Box::new(self.get_circle(y, settings)));
elements.push(Box::new(self.get_face_text(y, settings)));
elements
}
fn get_face_text(&self, y:usize, settings: &Settings) -> SvgText{
let c = self.calc_circle(y, settings);
let sy = y as f32 * settings.text_height;
let face = format!("<tspan class='head'>(</tspan>{}<tspan class='head'>)</tspan>", escape_str(&self.face));
to_svg_text_pixel_escaped(&face, c.cx, sy, settings, Anchor::Middle)
}
fn calc_circle(&self, y:usize, settings: &Settings) -> Circle {
let text_width = settings.text_width;
let text_height = settings.text_height;
let radius = self.distance() as f32 / 2.0;
let center = self. startx as f32 + radius;
let cx = center * text_width;
let cy = y as f32 * text_height + text_height / 2.0;
let cr = radius * text_width;
Circle{
cx: cx,
cy: cy,
r: cr
}
}
fn get_circle(&self, y: usize, settings: &Settings)-> SvgCircle{
let c = self.calc_circle(y, settings);
let (offsetx, offsety) = settings.offset();
SvgCircle::new()
.set("cx",c.cx + offsetx)
.set("cy", c.cy + offsety)
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
||!is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node +'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => {
relines.push_str(line);
relines.push('\n');
}
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let body = parse_memes(s);
if body.has_memes(){
let nodes = body.get_svg_elements(y, settings);
Some((nodes, body.unify_rest_text()))
}else{
None
}
}
#[test]
fn test_1line(){
let meme = "";
let nodes = get_svg_elements(0, meme, &Settings::default());
assert_eq!(nodes.len(), 0);
}
/// TODO: include parsing the rest of the unused text
fn parse_memes(s: &str) -> Body{
let mut memes = vec![];
let mut paren_opened = false;
let mut meme_face = String::new();
let mut index = 0;
let mut total_width = 0;
let mut face_markers:Vec<Head> = vec![];
let mut startx = 0;
let mut start_position = 0;
let mut meme_start = 0;
let mut meme_body = String::new();
let mut meme_left_side = String::new();
let mut meme_right_side = String::new();
let mut meme_head = None;
let total_chars = s.chars().count();
let mut rest_text:Vec<(usize, String)> = vec![];
for ch in s.chars(){
let last_char = index == total_chars - 1;
if meme_head.is_some(){
meme_right_side.push(ch);
}
if paren_opened && ch == ')'{ //if paren_opened and encountered a closing
paren_opened = false;
if is_meme(&meme_face){
let head = Head{
start_position: start_position,
startx: startx,
face: meme_face.clone(),
end_position: index,
endx: total_width,
};
meme_head = Some(head.clone());
face_markers.push(head);
meme_face.clear();
}
}
if paren_opened{
meme_face.push(ch);
}
if ch == '('{
paren_opened = true;
startx = total_width;
start_position = index;
meme_left_side = meme_body.clone();
meme_face.clear();
}
if meme_head.is_none() && (ch =='' || last_char){
meme_start = index + 1;
if!paren_opened{
let mut rest_word = meme_body.clone();
let rest_start = total_width - rest_word.width();
if last_char{
rest_word.push(ch);
rest_word.push_str(&meme_face);//the head is unmatched
}
rest_text.push((rest_start, rest_word));
}
meme_body.clear();
}
if meme_head.is_some() && (ch =='' || last_char){
let meme = Meme{
start_position: meme_start,
head: meme_head.clone().unwrap(),
end_position: index,
left_side: meme_left_side.clone(),
right_side: meme_right_side.clone(),
};
memes.push(meme);
meme_right_side.clear();
meme_left_side.clear();
meme_body.clear();
meme_head = None;
}
meme_body.push(ch);
if let Some(uw) = ch.width(){
total_width += uw;
}
index += 1;
}
Body{
memes: memes,
rest_str: regroup_rest_text(&rest_text) | let mut new_group = vec![];
//println!("regrouping text..");
for &(start,ref rest) in rest_text{
if new_group.is_empty(){
new_group.push((start, rest.clone()));
}else{
if let Some((lastx, last_rest)) = new_group.pop(){
if lastx + last_rest.width() == start{
let mut merged = String::new();
merged.push_str(&last_rest);
merged.push_str(rest);
new_group.push((lastx, merged));
}else{
new_group.push((lastx, last_rest));
new_group.push((start, rest.clone()));
}
}
}
}
//println!("new_group: {:#?}", new_group);
new_group
}
#[test]
fn test_meme() {
assert!(is_meme(" ͡° ͜ʖ ͡°"));
assert!(is_meme("⌐■_■"));
assert!(is_meme("ツ"));
assert!(!is_meme("svgbobg"));
assert!(!is_meme("not a meme in space"));
assert!(is_meme(" -_- "));
assert!(is_meme("-_-"));
assert!(!is_meme(" "));
}
#[test]
fn test_expression(){
assert!(is_meme("^_^"));
assert!(is_meme("x_x"));
assert!(!is_meme("+"));
assert!(!is_meme("x+y"));
assert!(!is_meme("x^2*y^2"));
assert!(!is_meme("x^2 * y^2"));
}
#[test]
fn test_bound(){
let meme = "(♥_♥)";
let memes = parse_memes(meme);
for m in memes.memes{
let b = m.head;
println!("bound {:?} d:{} ", b, b.distance());
assert_eq!(4, b.distance());
}
}
fn escape_str(s: &str) -> String{
let mut escaped = String::new();
for c in s.chars(){
escaped.push_str(&escape_char(&c));
}
escaped
}
fn escape_char(ch: &char) -> String {
let escs = [('"', """), ('\'', "'"), ('<', "<"), ('>', ">"), ('&', "&")];
let quote_match: Option<&(char, &str)> = escs.iter()
.find(|pair| {
let &(e, _) = *pair;
e == *ch
});
let quoted: String = match quote_match {
Some(&(_, quoted)) => String::from(quoted),
None => {
let mut s = String::new();
s.push(*ch);
s
}
};
quoted
}
fn is_operator(c: char) -> bool{
c == '+' || c == '-' || c == '*' || c == '/'
|| c == '^' || c == '%' || c == '!' || c == ','
|| c == '.' || c == '=' || c == '|' || c == '&'
}
#[test]
fn test_operator(){
assert!(!is_expression("^_^"));
assert!(is_operator('+'));
assert!(is_expression("+"));
assert!(is_expression("x+y"));
assert!(is_expression("x^2*y^2"));
assert!(is_expression("x^2 * y^2"));
}
//TODO: alternate alphanumeric_space and operator
fn is_expression(ch: &str) -> bool{
is_alphanumeric_space_operator(ch)
}
fn is_alphanumeric_space_operator(ch:&str) -> bool{
ch.chars().all(|c| c.is_alphanumeric() || c =='' || c == '_' || is_operator(c))
}
#[test]
fn test_body(){
let meme = "( ^_^)ノ";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(1, bodies.memes.len());
}
#[test]
fn test_body2(){
let meme = "ヘ( ^_^)ノ \(^_^ )Gimme Five";
println!("{}", meme);
let bodies = parse_memes(meme);
for b in &bodies.memes{
println!("{:#?}",b);
}
assert_eq!(2, bodies.memes.len());
}
#[test]
fn test_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
}
#[test]
fn test_unify_rest_of_text(){
let meme = r#"The rest of 凸(•̀_•́)凸❤️ ( ͡° ͜ʖ ͡°) \(°□°)/层∀ the text is here"#;
let resi = r#"The rest of the text is here"#;
println!("{}", meme);
let bodies = parse_memes(meme);
println!("{:#?}",bodies);
assert_eq!(3, bodies.memes.len());
assert_eq!(2, bodies.rest_str.len());
assert_eq!(meme.width(), bodies.unify_rest_text().width());
println!("residue: {} meme | }
}
fn regroup_rest_text(rest_text: &Vec<(usize, String)>)->Vec<(usize, String)>{ | random_line_split |
service.rs | /*
* Copyright 2019 OysterPack Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Message Broker Actor Service
use crate::message;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
use oysterpack_errors::Error;
use std::collections::HashMap;
use std::fmt;
// TODO: provide integration with https://docs.rs/async-bincode/0.4.9/async_bincode
// TODO: schedule a periodic job to clear precomputed keys that have not been used in a while
// TODO: metrics per message type
// TODO: support for replay protection based on the message's InstanceID timestamp - "old" messages are rejected
// TODO: a message is considered "old" if its timestamp is older than the most recent message processed within the client session
// TODO: support for seqential processing based on the message sequence - could be strict or loose
// TODO: support for signed addresses - a request is not accepted unless the address signature is verified
// TODO: all message types must have a deadline to ensure server resources are protected (and potentially from attack)
// TODO: message types must have a max deadline configured, to protect against attacks
/// Messaging actor service
/// - is a sync actor because it needs to perform CPU bound load for cryptography and compression
/// - the service is assigned a public-key based address
pub struct MessageService {
address: message::Address,
private_key: box_::SecretKey,
// sender -> precomputed key
precomputed_keys: HashMap<message::Address, box_::PrecomputedKey>,
message_handlers: HashMap<message::MessageType, actix::Recipient<Request>>,
}
impl MessageService {
/// constructor
pub fn new(address: message::Address, private_key: box_::SecretKey) -> MessageService {
MessageService {
address,
private_key,
precomputed_keys: HashMap::new(),
message_handlers: HashMap::new(),
}
}
}
impl fmt::Debug for MessageService {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let keys: Vec<&message::MessageType> = self.message_handlers.keys().collect();
write!(f, "MessageService(message_type:{:?})", keys)
}
}
impl actix::Actor for MessageService {
type Context = actix::Context<Self>;
}
/// Used to register a message handler
#[derive(Clone)]
pub struct RegisterMessageHandler {
message_type: message::MessageType,
handler: actix::Recipient<Request>,
}
impl RegisterMessageHandler {
/// constructor
pub fn new(
message_type: message::MessageType,
handler: actix::Recipient<Request>,
) -> RegisterMessageHandler {
RegisterMessageHandler {
message_type,
handler,
}
}
}
impl fmt::Debug for RegisterMessageHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegisterMessageHandler({:?})", self.message_type)
}
}
impl actix::Message for RegisterMessageHandler {
type Result = ();
}
/// Message Request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request(pub message::EncodedMessage);
impl actix::Message for Request {
type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType |
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be closed during application shutdown
pub const ERROR_LEVEL: Level = Level::Critical;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
) -> MailboxDeliveryError {
MailboxDeliveryError {
sender,
message_type,
err,
}
}
}
impl IsError for MailboxDeliveryError<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for MailboxDeliveryError<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: mailbox delivery error for message type: {} : {}",
self.sender, self.message_type, self.err,
)
}
}
}
#[allow(warnings)]
#[cfg(test)]
mod tests {
use crate::actor;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
struct EchoService;
impl actix::Actor for EchoService {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for EchoService {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result {
actix::MessageResult(Ok(req.0))
}
}
fn log_config() -> oysterpack_log::LogConfig {
oysterpack_log::config::LogConfigBuilder::new(oysterpack_log::Level::Info).build()
}
const MESSAGE_SERVICE: actor::arbiters::Name = actor::arbiters::Name("MESSAGE_SERVICE");
#[test]
fn message_service() {
let (client_pub_key, client_priv_key) = box_::gen_keypair();
let (server_pub_key, server_priv_key) = box_::gen_keypair();
let addresses =
crate::message::Addresses::new(client_pub_key.into(), server_pub_key.into());
let server_address = addresses.recipient().clone();
use crate::message::IsMessage;
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
struct Foo(String);
impl IsMessage for Foo {
const MESSAGE_TYPE_ID: crate::message::MessageTypeId =
crate::message::MessageTypeId(1867384532653698871582487715619812439);
}
let sealed_envelope = {
let metadata = crate::message::Metadata::new(
Foo::MESSAGE_TYPE_ID.message_type(),
crate::message::Encoding::Bincode(Some(crate::message::Compression::Snappy)),
Some(crate::message::Deadline::ProcessingTimeoutMillis(10)),
);
let msg = crate::message::Message::new(
metadata,
Foo("cryptocurrency is changing the world through decentralization".to_string()),
);
let msg = msg
.encoded_message(addresses.sender().clone(), addresses.recipient().clone())
.unwrap();
let msg = msg.open_envelope().unwrap();
let msg = msg.seal(
&addresses
.recipient()
.precompute_sealing_key(&client_priv_key),
);
msg
};
struct FooActor;
impl actix::Actor for FooActor {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for FooActor {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result {
actix::MessageResult(Ok(req.0))
}
}
const FOO: crate::actor::arbiters::Name = crate::actor::arbiters::Name("FOO");
actor::app::App::run(
crate::build::get(),
log_config(),
futures::future::lazy(move || {
actor::arbiters::start_actor(MESSAGE_SERVICE, move |_| {
super::MessageService::new(server_address, server_priv_key)
})
.and_then(|addr| {
let register_foo_actor = crate::actor::arbiters::start_actor(FOO, |_| FooActor);
let register_message_type = {
let addr = addr.clone();
register_foo_actor.and_then(move |foo| {
let foo = foo.recipient();
addr.send(super::RegisterMessageHandler::new(
Foo::MESSAGE_TYPE_ID.message_type(),
foo,
))
})
};
register_message_type
.and_then(move |_| addr.send(super::SealedEnvelopeRequest(sealed_envelope)))
})
.then(|result| {
let result = result.unwrap();
match result {
Ok(msg) => info!("result: {}", msg),
Err(e) => panic!("{}", e),
}
futures::future::ok::<(), ()>(())
})
}),
);
}
}
| {
UnsupportedMessageType {
sender,
message_type,
}
} | identifier_body |
service.rs | /*
* Copyright 2019 OysterPack Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Message Broker Actor Service
use crate::message;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
use oysterpack_errors::Error;
use std::collections::HashMap;
use std::fmt;
// TODO: provide integration with https://docs.rs/async-bincode/0.4.9/async_bincode
// TODO: schedule a periodic job to clear precomputed keys that have not been used in a while
// TODO: metrics per message type
// TODO: support for replay protection based on the message's InstanceID timestamp - "old" messages are rejected
// TODO: a message is considered "old" if its timestamp is older than the most recent message processed within the client session
// TODO: support for seqential processing based on the message sequence - could be strict or loose
// TODO: support for signed addresses - a request is not accepted unless the address signature is verified
// TODO: all message types must have a deadline to ensure server resources are protected (and potentially from attack)
// TODO: message types must have a max deadline configured, to protect against attacks
/// Messaging actor service
/// - is a sync actor because it needs to perform CPU bound load for cryptography and compression
/// - the service is assigned a public-key based address
pub struct MessageService {
address: message::Address,
private_key: box_::SecretKey,
// sender -> precomputed key
precomputed_keys: HashMap<message::Address, box_::PrecomputedKey>,
message_handlers: HashMap<message::MessageType, actix::Recipient<Request>>,
}
impl MessageService {
/// constructor
pub fn new(address: message::Address, private_key: box_::SecretKey) -> MessageService {
MessageService {
address,
private_key,
precomputed_keys: HashMap::new(),
message_handlers: HashMap::new(),
}
}
}
impl fmt::Debug for MessageService {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let keys: Vec<&message::MessageType> = self.message_handlers.keys().collect();
write!(f, "MessageService(message_type:{:?})", keys)
}
}
impl actix::Actor for MessageService {
type Context = actix::Context<Self>;
}
/// Used to register a message handler
#[derive(Clone)]
pub struct RegisterMessageHandler {
message_type: message::MessageType,
handler: actix::Recipient<Request>,
}
impl RegisterMessageHandler {
/// constructor
pub fn new(
message_type: message::MessageType,
handler: actix::Recipient<Request>,
) -> RegisterMessageHandler {
RegisterMessageHandler {
message_type,
handler,
}
}
}
impl fmt::Debug for RegisterMessageHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegisterMessageHandler({:?})", self.message_type)
}
}
impl actix::Message for RegisterMessageHandler {
type Result = ();
}
/// Message Request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request(pub message::EncodedMessage);
impl actix::Message for Request {
type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType {
UnsupportedMessageType {
sender,
message_type,
}
}
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be closed during application shutdown
pub const ERROR_LEVEL: Level = Level::Critical;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
) -> MailboxDeliveryError {
MailboxDeliveryError {
sender,
message_type,
err,
}
}
}
impl IsError for MailboxDeliveryError<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for MailboxDeliveryError<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: mailbox delivery error for message type: {} : {}",
self.sender, self.message_type, self.err,
)
}
}
}
#[allow(warnings)]
#[cfg(test)]
mod tests {
use crate::actor;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
struct EchoService;
impl actix::Actor for EchoService {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for EchoService {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result { | }
}
fn log_config() -> oysterpack_log::LogConfig {
oysterpack_log::config::LogConfigBuilder::new(oysterpack_log::Level::Info).build()
}
const MESSAGE_SERVICE: actor::arbiters::Name = actor::arbiters::Name("MESSAGE_SERVICE");
#[test]
fn message_service() {
let (client_pub_key, client_priv_key) = box_::gen_keypair();
let (server_pub_key, server_priv_key) = box_::gen_keypair();
let addresses =
crate::message::Addresses::new(client_pub_key.into(), server_pub_key.into());
let server_address = addresses.recipient().clone();
use crate::message::IsMessage;
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
struct Foo(String);
impl IsMessage for Foo {
const MESSAGE_TYPE_ID: crate::message::MessageTypeId =
crate::message::MessageTypeId(1867384532653698871582487715619812439);
}
let sealed_envelope = {
let metadata = crate::message::Metadata::new(
Foo::MESSAGE_TYPE_ID.message_type(),
crate::message::Encoding::Bincode(Some(crate::message::Compression::Snappy)),
Some(crate::message::Deadline::ProcessingTimeoutMillis(10)),
);
let msg = crate::message::Message::new(
metadata,
Foo("cryptocurrency is changing the world through decentralization".to_string()),
);
let msg = msg
.encoded_message(addresses.sender().clone(), addresses.recipient().clone())
.unwrap();
let msg = msg.open_envelope().unwrap();
let msg = msg.seal(
&addresses
.recipient()
.precompute_sealing_key(&client_priv_key),
);
msg
};
struct FooActor;
impl actix::Actor for FooActor {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for FooActor {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result {
actix::MessageResult(Ok(req.0))
}
}
const FOO: crate::actor::arbiters::Name = crate::actor::arbiters::Name("FOO");
actor::app::App::run(
crate::build::get(),
log_config(),
futures::future::lazy(move || {
actor::arbiters::start_actor(MESSAGE_SERVICE, move |_| {
super::MessageService::new(server_address, server_priv_key)
})
.and_then(|addr| {
let register_foo_actor = crate::actor::arbiters::start_actor(FOO, |_| FooActor);
let register_message_type = {
let addr = addr.clone();
register_foo_actor.and_then(move |foo| {
let foo = foo.recipient();
addr.send(super::RegisterMessageHandler::new(
Foo::MESSAGE_TYPE_ID.message_type(),
foo,
))
})
};
register_message_type
.and_then(move |_| addr.send(super::SealedEnvelopeRequest(sealed_envelope)))
})
.then(|result| {
let result = result.unwrap();
match result {
Ok(msg) => info!("result: {}", msg),
Err(e) => panic!("{}", e),
}
futures::future::ok::<(), ()>(())
})
}),
);
}
} | actix::MessageResult(Ok(req.0)) | random_line_split |
service.rs | /*
* Copyright 2019 OysterPack Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Message Broker Actor Service
use crate::message;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
use oysterpack_errors::Error;
use std::collections::HashMap;
use std::fmt;
// TODO: provide integration with https://docs.rs/async-bincode/0.4.9/async_bincode
// TODO: schedule a periodic job to clear precomputed keys that have not been used in a while
// TODO: metrics per message type
// TODO: support for replay protection based on the message's InstanceID timestamp - "old" messages are rejected
// TODO: a message is considered "old" if its timestamp is older than the most recent message processed within the client session
// TODO: support for seqential processing based on the message sequence - could be strict or loose
// TODO: support for signed addresses - a request is not accepted unless the address signature is verified
// TODO: all message types must have a deadline to ensure server resources are protected (and potentially from attack)
// TODO: message types must have a max deadline configured, to protect against attacks
/// Messaging actor service
/// - is a sync actor because it needs to perform CPU bound load for cryptography and compression
/// - the service is assigned a public-key based address
pub struct MessageService {
address: message::Address,
private_key: box_::SecretKey,
// sender -> precomputed key
precomputed_keys: HashMap<message::Address, box_::PrecomputedKey>,
message_handlers: HashMap<message::MessageType, actix::Recipient<Request>>,
}
impl MessageService {
/// constructor
pub fn new(address: message::Address, private_key: box_::SecretKey) -> MessageService {
MessageService {
address,
private_key,
precomputed_keys: HashMap::new(),
message_handlers: HashMap::new(),
}
}
}
impl fmt::Debug for MessageService {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let keys: Vec<&message::MessageType> = self.message_handlers.keys().collect();
write!(f, "MessageService(message_type:{:?})", keys)
}
}
impl actix::Actor for MessageService {
type Context = actix::Context<Self>;
}
/// Used to register a message handler
#[derive(Clone)]
pub struct RegisterMessageHandler {
message_type: message::MessageType,
handler: actix::Recipient<Request>,
}
impl RegisterMessageHandler {
/// constructor
pub fn new(
message_type: message::MessageType,
handler: actix::Recipient<Request>,
) -> RegisterMessageHandler {
RegisterMessageHandler {
message_type,
handler,
}
}
}
impl fmt::Debug for RegisterMessageHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegisterMessageHandler({:?})", self.message_type)
}
}
impl actix::Message for RegisterMessageHandler {
type Result = ();
}
/// Message Request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request(pub message::EncodedMessage);
impl actix::Message for Request {
type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType {
UnsupportedMessageType {
sender,
message_type,
}
}
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be closed during application shutdown
pub const ERROR_LEVEL: Level = Level::Critical;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
) -> MailboxDeliveryError {
MailboxDeliveryError {
sender,
message_type,
err,
}
}
}
impl IsError for MailboxDeliveryError<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for MailboxDeliveryError<'_> {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: mailbox delivery error for message type: {} : {}",
self.sender, self.message_type, self.err,
)
}
}
}
#[allow(warnings)]
#[cfg(test)]
mod tests {
use crate::actor;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
struct EchoService;
impl actix::Actor for EchoService {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for EchoService {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result {
actix::MessageResult(Ok(req.0))
}
}
fn log_config() -> oysterpack_log::LogConfig {
oysterpack_log::config::LogConfigBuilder::new(oysterpack_log::Level::Info).build()
}
const MESSAGE_SERVICE: actor::arbiters::Name = actor::arbiters::Name("MESSAGE_SERVICE");
#[test]
fn message_service() {
let (client_pub_key, client_priv_key) = box_::gen_keypair();
let (server_pub_key, server_priv_key) = box_::gen_keypair();
let addresses =
crate::message::Addresses::new(client_pub_key.into(), server_pub_key.into());
let server_address = addresses.recipient().clone();
use crate::message::IsMessage;
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)]
struct Foo(String);
impl IsMessage for Foo {
const MESSAGE_TYPE_ID: crate::message::MessageTypeId =
crate::message::MessageTypeId(1867384532653698871582487715619812439);
}
let sealed_envelope = {
let metadata = crate::message::Metadata::new(
Foo::MESSAGE_TYPE_ID.message_type(),
crate::message::Encoding::Bincode(Some(crate::message::Compression::Snappy)),
Some(crate::message::Deadline::ProcessingTimeoutMillis(10)),
);
let msg = crate::message::Message::new(
metadata,
Foo("cryptocurrency is changing the world through decentralization".to_string()),
);
let msg = msg
.encoded_message(addresses.sender().clone(), addresses.recipient().clone())
.unwrap();
let msg = msg.open_envelope().unwrap();
let msg = msg.seal(
&addresses
.recipient()
.precompute_sealing_key(&client_priv_key),
);
msg
};
struct FooActor;
impl actix::Actor for FooActor {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for FooActor {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result {
actix::MessageResult(Ok(req.0))
}
}
const FOO: crate::actor::arbiters::Name = crate::actor::arbiters::Name("FOO");
actor::app::App::run(
crate::build::get(),
log_config(),
futures::future::lazy(move || {
actor::arbiters::start_actor(MESSAGE_SERVICE, move |_| {
super::MessageService::new(server_address, server_priv_key)
})
.and_then(|addr| {
let register_foo_actor = crate::actor::arbiters::start_actor(FOO, |_| FooActor);
let register_message_type = {
let addr = addr.clone();
register_foo_actor.and_then(move |foo| {
let foo = foo.recipient();
addr.send(super::RegisterMessageHandler::new(
Foo::MESSAGE_TYPE_ID.message_type(),
foo,
))
})
};
register_message_type
.and_then(move |_| addr.send(super::SealedEnvelopeRequest(sealed_envelope)))
})
.then(|result| {
let result = result.unwrap();
match result {
Ok(msg) => info!("result: {}", msg),
Err(e) => panic!("{}", e),
}
futures::future::ok::<(), ()>(())
})
}),
);
}
}
| fmt | identifier_name |
tar_helper.rs | ///! Tar helper is internal to `/get` implementation. It uses some private parts of the `tar-rs`
///! crate to provide a `BytesMut` writing implementation instead of one using `std::io` interfaces.
///!
///! Code was originally taken and modified from the dependency version of `tar-rs`. The most
///! important copied parts are related to the long file name and long link name support. Issue
///! will be opened on the `tar-rs` to discuss if these could be made public, leaving us only the
///! `Bytes` (copying) code.
use super::GetError;
use bytes::{buf::BufMut, Bytes, BytesMut};
use ipfs::unixfs::ll::Metadata;
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use tar::{EntryType, Header};
/// Tar helper is internal to `get` implementation. It uses some private parts of the `tar-rs`
/// crate to append the headers and the contents to a pair of `bytes::Bytes` operated in a
/// round-robin fashion.
pub(super) struct TarHelper {
bufsize: usize,
bytes: BytesMut,
header: Header,
long_filename_header: Header,
zeroes: Bytes,
}
impl TarHelper {
pub(super) fn with_capacity(n: usize) -> Self {
let bytes = BytesMut::with_capacity(n);
// these are 512 a piece
let header = Self::new_default_header();
let long_filename_header = Self::new_long_filename_header();
let mut zeroes = BytesMut::with_capacity(512);
for _ in 0..(512 / 8) {
zeroes.put_u64(0);
}
assert_eq!(zeroes.len(), 512);
let zeroes = zeroes.freeze();
Self {
bufsize: n,
bytes,
header,
long_filename_header,
zeroes,
}
}
fn new_default_header() -> tar::Header {
let mut header = tar::Header::new_gnu();
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
header
}
fn new_long_filename_header() -> tar::Header {
let mut long_filename_header = tar::Header::new_gnu();
long_filename_header.set_mode(0o644);
{
let name = b"././@LongLink";
let gnu_header = long_filename_header.as_gnu_mut().unwrap();
// since we are reusing the header, zero out all of the bytes
let written = name
.iter()
.copied()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn apply_file(
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = String::from_utf8(bytes).map_err(|uerr| not_unicode(&uerr.into_bytes()))?;
Ok(Cow::Owned(PathBuf::from(s)))
}
}
}
// Used with windows.
#[allow(dead_code)]
fn not_unicode(v: &[u8]) -> std::io::Error {
use std::io::{Error, ErrorKind};
Error::new(
ErrorKind::Other,
format!(
"only Unicode paths are supported on Windows: {}",
String::from_utf8_lossy(v)
),
)
}
// we **only** have utf8 paths as protobuf has already parsed this file
// name and all of the previous ones as utf8.
let data = path2bytes(path);
let max = header.as_old().name.len();
if data.len() < max {
return Err(GetError::InvalidFileName(data.to_vec()));
}
// the plus one is documented as compliance with GNU tar, probably the null byte
// termination?
long_filename_header.set_size(data.len() as u64 + 1);
long_filename_header.set_entry_type(tar::EntryType::new(b'L'));
long_filename_header.set_cksum();
// we still need to figure out the truncated path we put into the header
let path = bytes2path(Cow::Borrowed(&data[..max]))
.expect("quite certain we have no non-utf8 paths here");
header
.set_path(&path)
.expect("we already made sure the path is of fitting length");
Ok(data)
}
#[cfg(unix)]
fn path2bytes(p: &Path) -> &[u8] {
use std::os::unix::prelude::*;
p.as_os_str().as_bytes()
}
#[cfg(windows)] | fn path2bytes(p: &Path) -> &[u8] {
p.as_os_str()
.to_str()
.expect("we should only have unicode compatible bytes even on windows")
.as_bytes()
} | random_line_split |
|
tar_helper.rs | ///! Tar helper is internal to `/get` implementation. It uses some private parts of the `tar-rs`
///! crate to provide a `BytesMut` writing implementation instead of one using `std::io` interfaces.
///!
///! Code was originally taken and modified from the dependency version of `tar-rs`. The most
///! important copied parts are related to the long file name and long link name support. Issue
///! will be opened on the `tar-rs` to discuss if these could be made public, leaving us only the
///! `Bytes` (copying) code.
use super::GetError;
use bytes::{buf::BufMut, Bytes, BytesMut};
use ipfs::unixfs::ll::Metadata;
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use tar::{EntryType, Header};
/// Tar helper is internal to `get` implementation. It uses some private parts of the `tar-rs`
/// crate to append the headers and the contents to a pair of `bytes::Bytes` operated in a
/// round-robin fashion.
pub(super) struct TarHelper {
bufsize: usize,
bytes: BytesMut,
header: Header,
long_filename_header: Header,
zeroes: Bytes,
}
impl TarHelper {
pub(super) fn with_capacity(n: usize) -> Self | }
fn new_default_header() -> tar::Header {
let mut header = tar::Header::new_gnu();
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
header
}
fn new_long_filename_header() -> tar::Header {
let mut long_filename_header = tar::Header::new_gnu();
long_filename_header.set_mode(0o644);
{
let name = b"././@LongLink";
let gnu_header = long_filename_header.as_gnu_mut().unwrap();
// since we are reusing the header, zero out all of the bytes
let written = name
.iter()
.copied()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn apply_file(
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = String::from_utf8(bytes).map_err(|uerr| not_unicode(&uerr.into_bytes()))?;
Ok(Cow::Owned(PathBuf::from(s)))
}
}
}
// Used with windows.
#[allow(dead_code)]
fn not_unicode(v: &[u8]) -> std::io::Error {
use std::io::{Error, ErrorKind};
Error::new(
ErrorKind::Other,
format!(
"only Unicode paths are supported on Windows: {}",
String::from_utf8_lossy(v)
),
)
}
// we **only** have utf8 paths as protobuf has already parsed this file
// name and all of the previous ones as utf8.
let data = path2bytes(path);
let max = header.as_old().name.len();
if data.len() < max {
return Err(GetError::InvalidFileName(data.to_vec()));
}
// the plus one is documented as compliance with GNU tar, probably the null byte
// termination?
long_filename_header.set_size(data.len() as u64 + 1);
long_filename_header.set_entry_type(tar::EntryType::new(b'L'));
long_filename_header.set_cksum();
// we still need to figure out the truncated path we put into the header
let path = bytes2path(Cow::Borrowed(&data[..max]))
.expect("quite certain we have no non-utf8 paths here");
header
.set_path(&path)
.expect("we already made sure the path is of fitting length");
Ok(data)
}
#[cfg(unix)]
fn path2bytes(p: &Path) -> &[u8] {
use std::os::unix::prelude::*;
p.as_os_str().as_bytes()
}
#[cfg(windows)]
fn path2bytes(p: &Path) -> &[u8] {
p.as_os_str()
.to_str()
.expect("we should only have unicode compatible bytes even on windows")
.as_bytes()
}
| {
let bytes = BytesMut::with_capacity(n);
// these are 512 a piece
let header = Self::new_default_header();
let long_filename_header = Self::new_long_filename_header();
let mut zeroes = BytesMut::with_capacity(512);
for _ in 0..(512 / 8) {
zeroes.put_u64(0);
}
assert_eq!(zeroes.len(), 512);
let zeroes = zeroes.freeze();
Self {
bufsize: n,
bytes,
header,
long_filename_header,
zeroes,
} | identifier_body |
tar_helper.rs | ///! Tar helper is internal to `/get` implementation. It uses some private parts of the `tar-rs`
///! crate to provide a `BytesMut` writing implementation instead of one using `std::io` interfaces.
///!
///! Code was originally taken and modified from the dependency version of `tar-rs`. The most
///! important copied parts are related to the long file name and long link name support. Issue
///! will be opened on the `tar-rs` to discuss if these could be made public, leaving us only the
///! `Bytes` (copying) code.
use super::GetError;
use bytes::{buf::BufMut, Bytes, BytesMut};
use ipfs::unixfs::ll::Metadata;
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use tar::{EntryType, Header};
/// Tar helper is internal to `get` implementation. It uses some private parts of the `tar-rs`
/// crate to append the headers and the contents to a pair of `bytes::Bytes` operated in a
/// round-robin fashion.
pub(super) struct TarHelper {
bufsize: usize,
bytes: BytesMut,
header: Header,
long_filename_header: Header,
zeroes: Bytes,
}
impl TarHelper {
pub(super) fn with_capacity(n: usize) -> Self {
let bytes = BytesMut::with_capacity(n);
// these are 512 a piece
let header = Self::new_default_header();
let long_filename_header = Self::new_long_filename_header();
let mut zeroes = BytesMut::with_capacity(512);
for _ in 0..(512 / 8) {
zeroes.put_u64(0);
}
assert_eq!(zeroes.len(), 512);
let zeroes = zeroes.freeze();
Self {
bufsize: n,
bytes,
header,
long_filename_header,
zeroes,
}
}
fn new_default_header() -> tar::Header {
let mut header = tar::Header::new_gnu();
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
header
}
fn new_long_filename_header() -> tar::Header {
let mut long_filename_header = tar::Header::new_gnu();
long_filename_header.set_mode(0o644);
{
let name = b"././@LongLink";
let gnu_header = long_filename_header.as_gnu_mut().unwrap();
// since we are reusing the header, zero out all of the bytes
let written = name
.iter()
.copied()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn | (
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = String::from_utf8(bytes).map_err(|uerr| not_unicode(&uerr.into_bytes()))?;
Ok(Cow::Owned(PathBuf::from(s)))
}
}
}
// Used with windows.
#[allow(dead_code)]
fn not_unicode(v: &[u8]) -> std::io::Error {
use std::io::{Error, ErrorKind};
Error::new(
ErrorKind::Other,
format!(
"only Unicode paths are supported on Windows: {}",
String::from_utf8_lossy(v)
),
)
}
// we **only** have utf8 paths as protobuf has already parsed this file
// name and all of the previous ones as utf8.
let data = path2bytes(path);
let max = header.as_old().name.len();
if data.len() < max {
return Err(GetError::InvalidFileName(data.to_vec()));
}
// the plus one is documented as compliance with GNU tar, probably the null byte
// termination?
long_filename_header.set_size(data.len() as u64 + 1);
long_filename_header.set_entry_type(tar::EntryType::new(b'L'));
long_filename_header.set_cksum();
// we still need to figure out the truncated path we put into the header
let path = bytes2path(Cow::Borrowed(&data[..max]))
.expect("quite certain we have no non-utf8 paths here");
header
.set_path(&path)
.expect("we already made sure the path is of fitting length");
Ok(data)
}
#[cfg(unix)]
fn path2bytes(p: &Path) -> &[u8] {
use std::os::unix::prelude::*;
p.as_os_str().as_bytes()
}
#[cfg(windows)]
fn path2bytes(p: &Path) -> &[u8] {
p.as_os_str()
.to_str()
.expect("we should only have unicode compatible bytes even on windows")
.as_bytes()
}
| apply_file | identifier_name |
root.rs | //! The root task.
use crate::consts::HSI_CLK;
use crate::{
drv::{
exti::{ExtiDrv, ExtiSetup},
flash::Flash,
gpio::GpioHead,
hsi::Hsi,
lse::Lse,
pll::Pll,
rcc::Rcc,
},
drv_gpio_pins,
sys::{gpio_pins::GpioPins, system::System},
thr,
thr::{Thrs, ThrsInit},
Regs,
};
use drone_core::log;
use drone_cortexm::swo;
use drone_cortexm::processor::fpu_init;
use drone_cortexm::{fib, reg::prelude::*, thr::prelude::*};
use drone_stm32_map::periph::exti::periph_exti5;
use drone_stm32_map::periph::exti::Exti5;
use drone_stm32_map::periph::gpio::periph_gpio_b_head;
use drone_stm32_map::periph::sys_tick::{periph_sys_tick, SysTickPeriph};
use futures::prelude::*;
use futures::select_biased;
enum Event {
Tick,
Push,
}
enum ClockMode {
Reset8MHz,
Medium32MHz,
High64MHz,
}
enum Led {
GreenLed = 1,
}
/// An error returned when a receiver has missed too many ticks.
#[derive(Debug)]
pub struct TickOverflow;
/// System Resources
pub struct SystemRes {
pub sys_tick: SysTickPeriph,
pub thr_sys_tick: thr::SysTick,
pub pll: Pll,
pub hsi: Hsi,
pub lse: Lse,
pub rcc: Rcc,
pub flash: Flash,
pub pllmul: u32,
pub clksrc: u32,
pub pllsrc: u32,
pub hpre: u32,
pub prediv: u32,
}
#[allow(unused_labels)]
#[inline(never)]
pub fn handler(reg: Regs, thr_init: ThrsInit) {
let mut clock_mode = ClockMode::High64MHz;
let (thr, scb) = thr::init_extended(thr_init);
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Allocate the clock control resources.
let mut res = SystemRes {
sys_tick: periph_sys_tick!(reg),
thr_sys_tick: thr.sys_tick,
// ----------------------
// -- Clocks.
// The internal PLLs can be used to multiply the HSI or HSE
// output clock frequency.
pll: Pll::new(periph_pll!(reg)),
// The HSI clock signal is generated from an internal 8 MHz RC Oscillator.
hsi: Hsi::new(periph_hsi!(reg)),
// The LSE clock (32.768K oscillator, not used in this crate.)
lse: Lse::new(periph_lse!(reg)),
// The RCC component.
rcc: Rcc::new(periph_rcc!(reg)),
// The flash component,
flash: Flash::new(periph_flash!(reg)),
// ----------------------
// -- Factors and selectors.
// CAUTION: Setting wrong values may make your system unusable.
// Read the reference manual for detailed information.
//
// PLL multiplication factor.
// Possible values for pllmul:
// Caution: The PLL output frequency must not exceed 72 MHz.
// 0000: PLL input clock x 2
// 0001: PLL input clock x 3
// 0010: PLL input clock x 4
// 0011: PLL input clock x 5
// 0100: PLL input clock x 6
// 0101: PLL input clock x 7
// 0110: PLL input clock x 8
// 0111: PLL input clock x 9
// 1000: PLL input clock x 10
// 1001: PLL input clock x 11
// 1010: PLL input clock x 12
// 1011: PLL input clock x 13
// 1100: PLL input clock x 14
// 1101: PLL input clock x 15
// 1110: PLL input clock x 16
// 1111: Not applicable
pllmul: 0b1110, // Field RCC_CFGR PLLMUL in ref. manual RM0316.
// System clock switch.
// Possible values for clksrc:
// 00: HSI oscillator used as system clock.
// 01: HSE oscillator used as system clock.
// 10: PLL used as system clock
// 11: Not applicable.
clksrc: 0b10, // Field RCC_CFGR SW in ref. manual RM0316.
//
// Possible values for pllsrc:
// Caution: Different values for STM32F303xD/E and STM32F398xE!
// 00: HSI/2 selected as PLL input clock.
// 01: HSE/PREDIV selected as PLL input clock
// 10: Reserved.
// 11: Reserved.
pllsrc: 0b00, // Field RCC_CFGR PLLSRC in ref. manual RM0316.
// Division factor of the AHB clock (AHB prescaler).
// Possible values for hpre:
// 0xxx: SYSCLK not divided
// 1000: SYSCLK divided by 2
// 1001: SYSCLK divided by 4
// 1010: SYSCLK divided by 8
// 1011: SYSCLK divided by 16
// 1100: SYSCLK divided by 64
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin.
falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
});
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn | (
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since the last stream poll. If this happens, a
// `TickOverflow` error will be sent over the stream as is final value.
|| Err(TickOverflow),
// A fiber that will be called on each interrupt trigger. It sends a
// single tick over the stream.
fib::new_fn(|| fib::Yielded(Some(1))),
);
// Clear the current value of the timer.
res.sys_tick.stk_val.store(|r| r.write_current(0));
//
// The duration of setting the led ON is inversely proportional to the
// MCU clock speed. It shall be:
// 3.60 seconds when cpu clocks @ 4MHz
// 0.40 seconds when cpu clocks @ 36MHz
// 0.20 seconds when cpu clocks @ 72MHz
// The trigger is set so that it returns twice per interval
// at the highest speed, and proportionally more often per interval
// at lower speeds.
// That way, the Exti interrupt will happen every 100ms at all speeds
// and it can be used to for debounceing and doubleclick control.
let mut trigger = 4_000_000 / 8; // So many systick/sec at 4MHz.
trigger = trigger / 10; // So many in 100ms at 4MHz.
trigger = trigger * (hclk / 4_000_000); // More at higher speed
res.sys_tick.stk_load.store(|r| r.write_reload(trigger));
res.sys_tick.stk_ctrl.store(|r| {
r.set_tickint() // Counting down to 0 triggers the SysTick interrupt
.set_enable() // Start the counter in a multi-shot way
});
let mut green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true); // Start with red led ON.
// Enable the interrupt for the user button.
thr.exti_9_5.enable_int();
// Counters
let mut debounce_protection: i16 = 0;
let mut doubleclick_protection: i16 = 0;
let mut ticks_cnt: u32 = 0;
// Monitored interval lengths (accumulated ticks).
let debounce_ival = 2;
let doubleclick_ival = 4;
// This is dependent on mcu speed:
let ticks_ival: u32 = 40 / (hclk / 4_000_000);
'blinky: loop {
let evt = select_biased! {
_p = button_stream.next().fuse() => Event::Push,
_t = tick_stream.next().fuse() => Event::Tick,
};
match evt {
Event::Tick => {
if debounce_protection > i16::MIN {
debounce_protection = debounce_protection - 1;
};
if doubleclick_protection < i16::MAX {
doubleclick_protection = doubleclick_protection + 1;
};
if debounce_protection == 0 && doubleclick_protection >= doubleclick_ival {
println!("Switch to new speed");
break 'blinky;
}
// The low and the high interval is 'ticks_ival' ticks.
ticks_cnt = ticks_cnt + 1;
if ticks_cnt >= ticks_ival {
ticks_cnt = 0;
match green_led_on {
true => {
println!("LED off");
green_led_on = false;
gpio_pins.output(Led::GreenLed as u8, false);
}
_ => {
println!("LED on");
green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true);
}
}
}
}
Event::Push => {
// After disabling the interrupt or after re-enabling
// the interrupt, the stream needs to be flushed to protect
// the logic during the switching period against mechanical
// contact bouncing and doubleclicks.
if doubleclick_protection > doubleclick_ival {
println!("--");
thr.exti_9_5.disable_int();
debounce_protection = debounce_ival;
} else {
doubleclick_protection = 0;
println!("++");
}
}
}
}
Event::Push
}
| listen | identifier_name |
root.rs | //! The root task.
use crate::consts::HSI_CLK;
use crate::{
drv::{
exti::{ExtiDrv, ExtiSetup},
flash::Flash,
gpio::GpioHead,
hsi::Hsi,
lse::Lse,
pll::Pll,
rcc::Rcc,
},
drv_gpio_pins,
sys::{gpio_pins::GpioPins, system::System},
thr,
thr::{Thrs, ThrsInit},
Regs,
};
use drone_core::log;
use drone_cortexm::swo;
use drone_cortexm::processor::fpu_init;
use drone_cortexm::{fib, reg::prelude::*, thr::prelude::*};
use drone_stm32_map::periph::exti::periph_exti5;
use drone_stm32_map::periph::exti::Exti5;
use drone_stm32_map::periph::gpio::periph_gpio_b_head;
use drone_stm32_map::periph::sys_tick::{periph_sys_tick, SysTickPeriph};
use futures::prelude::*;
use futures::select_biased;
enum Event {
Tick,
Push,
}
enum ClockMode {
Reset8MHz,
Medium32MHz,
High64MHz,
}
enum Led {
GreenLed = 1,
}
/// An error returned when a receiver has missed too many ticks.
#[derive(Debug)]
pub struct TickOverflow;
/// System Resources
pub struct SystemRes {
pub sys_tick: SysTickPeriph,
pub thr_sys_tick: thr::SysTick,
pub pll: Pll,
pub hsi: Hsi,
pub lse: Lse,
pub rcc: Rcc,
pub flash: Flash,
pub pllmul: u32,
pub clksrc: u32,
pub pllsrc: u32,
pub hpre: u32,
pub prediv: u32,
}
#[allow(unused_labels)]
#[inline(never)]
pub fn handler(reg: Regs, thr_init: ThrsInit) {
let mut clock_mode = ClockMode::High64MHz;
let (thr, scb) = thr::init_extended(thr_init);
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Allocate the clock control resources.
let mut res = SystemRes {
sys_tick: periph_sys_tick!(reg),
thr_sys_tick: thr.sys_tick,
// ----------------------
// -- Clocks.
// The internal PLLs can be used to multiply the HSI or HSE
// output clock frequency.
pll: Pll::new(periph_pll!(reg)),
// The HSI clock signal is generated from an internal 8 MHz RC Oscillator.
hsi: Hsi::new(periph_hsi!(reg)),
// The LSE clock (32.768K oscillator, not used in this crate.)
lse: Lse::new(periph_lse!(reg)),
// The RCC component.
rcc: Rcc::new(periph_rcc!(reg)),
// The flash component,
flash: Flash::new(periph_flash!(reg)),
// ----------------------
// -- Factors and selectors.
// CAUTION: Setting wrong values may make your system unusable.
// Read the reference manual for detailed information.
//
// PLL multiplication factor.
// Possible values for pllmul:
// Caution: The PLL output frequency must not exceed 72 MHz.
// 0000: PLL input clock x 2
// 0001: PLL input clock x 3
// 0010: PLL input clock x 4
// 0011: PLL input clock x 5
// 0100: PLL input clock x 6
// 0101: PLL input clock x 7
// 0110: PLL input clock x 8
// 0111: PLL input clock x 9
// 1000: PLL input clock x 10
// 1001: PLL input clock x 11
// 1010: PLL input clock x 12
// 1011: PLL input clock x 13
// 1100: PLL input clock x 14
// 1101: PLL input clock x 15
// 1110: PLL input clock x 16
// 1111: Not applicable
pllmul: 0b1110, // Field RCC_CFGR PLLMUL in ref. manual RM0316.
// System clock switch.
// Possible values for clksrc:
// 00: HSI oscillator used as system clock.
// 01: HSE oscillator used as system clock.
// 10: PLL used as system clock
// 11: Not applicable.
clksrc: 0b10, // Field RCC_CFGR SW in ref. manual RM0316.
//
// Possible values for pllsrc:
// Caution: Different values for STM32F303xD/E and STM32F398xE!
// 00: HSI/2 selected as PLL input clock.
// 01: HSE/PREDIV selected as PLL input clock
// 10: Reserved.
// 11: Reserved.
pllsrc: 0b00, // Field RCC_CFGR PLLSRC in ref. manual RM0316.
// Division factor of the AHB clock (AHB prescaler).
// Possible values for hpre:
// 0xxx: SYSCLK not divided
// 1000: SYSCLK divided by 2
// 1001: SYSCLK divided by 4
// 1010: SYSCLK divided by 8
// 1011: SYSCLK divided by 16
// 1100: SYSCLK divided by 64
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin.
falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
});
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn listen(
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since the last stream poll. If this happens, a
// `TickOverflow` error will be sent over the stream as is final value.
|| Err(TickOverflow),
// A fiber that will be called on each interrupt trigger. It sends a
// single tick over the stream.
fib::new_fn(|| fib::Yielded(Some(1))),
);
// Clear the current value of the timer.
res.sys_tick.stk_val.store(|r| r.write_current(0));
//
// The duration of setting the led ON is inversely proportional to the
// MCU clock speed. It shall be:
// 3.60 seconds when cpu clocks @ 4MHz
// 0.40 seconds when cpu clocks @ 36MHz
// 0.20 seconds when cpu clocks @ 72MHz
// The trigger is set so that it returns twice per interval
// at the highest speed, and proportionally more often per interval
// at lower speeds.
// That way, the Exti interrupt will happen every 100ms at all speeds
// and it can be used to for debounceing and doubleclick control.
let mut trigger = 4_000_000 / 8; // So many systick/sec at 4MHz.
trigger = trigger / 10; // So many in 100ms at 4MHz.
trigger = trigger * (hclk / 4_000_000); // More at higher speed
res.sys_tick.stk_load.store(|r| r.write_reload(trigger));
res.sys_tick.stk_ctrl.store(|r| {
r.set_tickint() // Counting down to 0 triggers the SysTick interrupt
.set_enable() // Start the counter in a multi-shot way
});
let mut green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true); // Start with red led ON.
// Enable the interrupt for the user button.
thr.exti_9_5.enable_int();
// Counters
let mut debounce_protection: i16 = 0;
let mut doubleclick_protection: i16 = 0;
let mut ticks_cnt: u32 = 0;
// Monitored interval lengths (accumulated ticks).
let debounce_ival = 2;
let doubleclick_ival = 4;
// This is dependent on mcu speed:
let ticks_ival: u32 = 40 / (hclk / 4_000_000);
'blinky: loop {
let evt = select_biased! {
_p = button_stream.next().fuse() => Event::Push,
_t = tick_stream.next().fuse() => Event::Tick,
};
match evt {
Event::Tick => {
if debounce_protection > i16::MIN {
debounce_protection = debounce_protection - 1;
};
if doubleclick_protection < i16::MAX {
doubleclick_protection = doubleclick_protection + 1;
};
if debounce_protection == 0 && doubleclick_protection >= doubleclick_ival {
println!("Switch to new speed");
break 'blinky;
}
// The low and the high interval is 'ticks_ival' ticks.
ticks_cnt = ticks_cnt + 1;
if ticks_cnt >= ticks_ival {
ticks_cnt = 0;
match green_led_on {
true => {
println!("LED off");
green_led_on = false;
gpio_pins.output(Led::GreenLed as u8, false);
}
_ => {
println!("LED on");
green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true);
}
}
}
}
Event::Push => {
// After disabling the interrupt or after re-enabling
// the interrupt, the stream needs to be flushed to protect
// the logic during the switching period against mechanical
// contact bouncing and doubleclicks.
if doubleclick_protection > doubleclick_ival {
println!("--");
thr.exti_9_5.disable_int();
debounce_protection = debounce_ival;
} else |
}
}
}
Event::Push
}
| {
doubleclick_protection = 0;
println!("++");
} | conditional_block |
root.rs | //! The root task.
use crate::consts::HSI_CLK;
use crate::{
drv::{
exti::{ExtiDrv, ExtiSetup},
flash::Flash,
gpio::GpioHead,
hsi::Hsi,
lse::Lse,
pll::Pll,
rcc::Rcc,
},
drv_gpio_pins,
sys::{gpio_pins::GpioPins, system::System},
thr,
thr::{Thrs, ThrsInit},
Regs,
};
use drone_core::log;
use drone_cortexm::swo;
use drone_cortexm::processor::fpu_init;
use drone_cortexm::{fib, reg::prelude::*, thr::prelude::*};
use drone_stm32_map::periph::exti::periph_exti5;
use drone_stm32_map::periph::exti::Exti5;
use drone_stm32_map::periph::gpio::periph_gpio_b_head;
use drone_stm32_map::periph::sys_tick::{periph_sys_tick, SysTickPeriph};
use futures::prelude::*;
use futures::select_biased;
enum Event {
Tick,
Push,
}
enum ClockMode {
Reset8MHz,
Medium32MHz,
High64MHz,
}
enum Led {
GreenLed = 1,
}
/// An error returned when a receiver has missed too many ticks.
#[derive(Debug)]
pub struct TickOverflow;
/// System Resources
pub struct SystemRes {
pub sys_tick: SysTickPeriph,
pub thr_sys_tick: thr::SysTick,
pub pll: Pll,
pub hsi: Hsi,
pub lse: Lse,
pub rcc: Rcc,
pub flash: Flash,
pub pllmul: u32,
pub clksrc: u32,
pub pllsrc: u32,
pub hpre: u32,
pub prediv: u32,
}
#[allow(unused_labels)]
#[inline(never)]
pub fn handler(reg: Regs, thr_init: ThrsInit) {
let mut clock_mode = ClockMode::High64MHz;
let (thr, scb) = thr::init_extended(thr_init);
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Allocate the clock control resources.
let mut res = SystemRes {
sys_tick: periph_sys_tick!(reg),
thr_sys_tick: thr.sys_tick,
// ----------------------
// -- Clocks.
// The internal PLLs can be used to multiply the HSI or HSE
// output clock frequency.
pll: Pll::new(periph_pll!(reg)),
// The HSI clock signal is generated from an internal 8 MHz RC Oscillator.
hsi: Hsi::new(periph_hsi!(reg)),
// The LSE clock (32.768K oscillator, not used in this crate.)
lse: Lse::new(periph_lse!(reg)),
// The RCC component.
rcc: Rcc::new(periph_rcc!(reg)),
// The flash component,
flash: Flash::new(periph_flash!(reg)),
// ----------------------
// -- Factors and selectors.
// CAUTION: Setting wrong values may make your system unusable.
// Read the reference manual for detailed information.
//
// PLL multiplication factor.
// Possible values for pllmul:
// Caution: The PLL output frequency must not exceed 72 MHz.
// 0000: PLL input clock x 2
// 0001: PLL input clock x 3
// 0010: PLL input clock x 4
// 0011: PLL input clock x 5
// 0100: PLL input clock x 6
// 0101: PLL input clock x 7
// 0110: PLL input clock x 8
// 0111: PLL input clock x 9
// 1000: PLL input clock x 10
// 1001: PLL input clock x 11
// 1010: PLL input clock x 12
// 1011: PLL input clock x 13
// 1100: PLL input clock x 14
// 1101: PLL input clock x 15
// 1110: PLL input clock x 16
// 1111: Not applicable
pllmul: 0b1110, // Field RCC_CFGR PLLMUL in ref. manual RM0316.
// System clock switch.
// Possible values for clksrc:
// 00: HSI oscillator used as system clock.
// 01: HSE oscillator used as system clock.
// 10: PLL used as system clock
// 11: Not applicable.
clksrc: 0b10, // Field RCC_CFGR SW in ref. manual RM0316.
//
// Possible values for pllsrc:
// Caution: Different values for STM32F303xD/E and STM32F398xE!
// 00: HSI/2 selected as PLL input clock.
// 01: HSE/PREDIV selected as PLL input clock
// 10: Reserved.
// 11: Reserved.
pllsrc: 0b00, // Field RCC_CFGR PLLSRC in ref. manual RM0316.
// Division factor of the AHB clock (AHB prescaler).
// Possible values for hpre:
// 0xxx: SYSCLK not divided
// 1000: SYSCLK divided by 2
// 1001: SYSCLK divided by 4
// 1010: SYSCLK divided by 8
// 1011: SYSCLK divided by 16
// 1100: SYSCLK divided by 64
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin. |
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn listen(
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since the last stream poll. If this happens, a
// `TickOverflow` error will be sent over the stream as is final value.
|| Err(TickOverflow),
// A fiber that will be called on each interrupt trigger. It sends a
// single tick over the stream.
fib::new_fn(|| fib::Yielded(Some(1))),
);
// Clear the current value of the timer.
res.sys_tick.stk_val.store(|r| r.write_current(0));
//
// The duration of setting the led ON is inversely proportional to the
// MCU clock speed. It shall be:
// 3.60 seconds when cpu clocks @ 4MHz
// 0.40 seconds when cpu clocks @ 36MHz
// 0.20 seconds when cpu clocks @ 72MHz
// The trigger is set so that it returns twice per interval
// at the highest speed, and proportionally more often per interval
// at lower speeds.
// That way, the Exti interrupt will happen every 100ms at all speeds
// and it can be used to for debounceing and doubleclick control.
let mut trigger = 4_000_000 / 8; // So many systick/sec at 4MHz.
trigger = trigger / 10; // So many in 100ms at 4MHz.
trigger = trigger * (hclk / 4_000_000); // More at higher speed
res.sys_tick.stk_load.store(|r| r.write_reload(trigger));
res.sys_tick.stk_ctrl.store(|r| {
r.set_tickint() // Counting down to 0 triggers the SysTick interrupt
.set_enable() // Start the counter in a multi-shot way
});
let mut green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true); // Start with red led ON.
// Enable the interrupt for the user button.
thr.exti_9_5.enable_int();
// Counters
let mut debounce_protection: i16 = 0;
let mut doubleclick_protection: i16 = 0;
let mut ticks_cnt: u32 = 0;
// Monitored interval lengths (accumulated ticks).
let debounce_ival = 2;
let doubleclick_ival = 4;
// This is dependent on mcu speed:
let ticks_ival: u32 = 40 / (hclk / 4_000_000);
'blinky: loop {
let evt = select_biased! {
_p = button_stream.next().fuse() => Event::Push,
_t = tick_stream.next().fuse() => Event::Tick,
};
match evt {
Event::Tick => {
if debounce_protection > i16::MIN {
debounce_protection = debounce_protection - 1;
};
if doubleclick_protection < i16::MAX {
doubleclick_protection = doubleclick_protection + 1;
};
if debounce_protection == 0 && doubleclick_protection >= doubleclick_ival {
println!("Switch to new speed");
break 'blinky;
}
// The low and the high interval is 'ticks_ival' ticks.
ticks_cnt = ticks_cnt + 1;
if ticks_cnt >= ticks_ival {
ticks_cnt = 0;
match green_led_on {
true => {
println!("LED off");
green_led_on = false;
gpio_pins.output(Led::GreenLed as u8, false);
}
_ => {
println!("LED on");
green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true);
}
}
}
}
Event::Push => {
// After disabling the interrupt or after re-enabling
// the interrupt, the stream needs to be flushed to protect
// the logic during the switching period against mechanical
// contact bouncing and doubleclicks.
if doubleclick_protection > doubleclick_ival {
println!("--");
thr.exti_9_5.disable_int();
debounce_protection = debounce_ival;
} else {
doubleclick_protection = 0;
println!("++");
}
}
}
}
Event::Push
} | falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
}); | random_line_split |
wait_cell.rs | <'a> {
/// The [`WaitCell`] being waited on.
cell: &'a WaitCell,
}
#[derive(Eq, PartialEq, Copy, Clone)]
struct State(usize);
// === impl WaitCell ===
impl WaitCell {
loom_const_fn! {
/// Returns a new `WaitCell`, with no [`Waker`] stored in it.
#[must_use]
pub fn new() -> Self {
Self {
state: CachePadded::new(AtomicUsize::new(State::WAITING.0)),
waker: UnsafeCell::new(None),
}
}
}
}
impl WaitCell {
/// Poll to wait on this `WaitCell`, consuming a stored wakeup or
/// registering the [`Waker`] from the provided [`Context`] to be woken by
/// the next wakeup.
///
/// Once a [`Waker`] has been registered, a subsequent call to [`wake`] will
/// wake that [`Waker`].
///
/// # Returns
///
/// - [`Poll::Pending`] if the [`Waker`] was registered. If this method returns
/// [`Poll::Pending`], then the registered [`Waker`] will be woken by a
/// subsequent call to [`wake`].
/// - [`Poll::Ready`]`(`[`Ok`]`(()))` if the cell was woken by a call to
/// [`wake`] while the [`Waker`] was being registered.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Closed`]`))` if the
/// [`WaitCell`] has been closed.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Busy`]`))` if another
/// task was concurrently registering its [`Waker`] with this
/// [`WaitCell`].
///
/// [`wake`]: Self::wake
pub fn poll_wait(&self, cx: &mut Context<'_>) -> Poll<Result<(), PollWaitError>> {
enter_test_debug_span!("WaitCell::poll_wait", cell =?fmt::ptr(self));
// this is based on tokio's AtomicWaker synchronization strategy
match test_dbg!(self.compare_exchange(State::WAITING, State::REGISTERING, Acquire)) {
Err(actual) if test_dbg!(actual.contains(State::CLOSED)) => {
return Poll::Ready(Err(PollWaitError::Closed));
}
Err(actual) if test_dbg!(actual.contains(State::WOKEN)) => {
// take the wakeup
self.fetch_and(!State::WOKEN, Release);
return Poll::Ready(Ok(()));
}
// someone else is notifying, so don't wait!
Err(actual) if test_dbg!(actual.contains(State::WAKING)) => {
return Poll::Ready(Ok(()));
}
Err(_) => return Poll::Ready(Err(PollWaitError::Busy)),
Ok(_) => {}
}
let waker = cx.waker();
trace!(wait_cell =?fmt::ptr(self),?waker, "registering waker");
let prev_waker = self.waker.with_mut(|old_waker| unsafe {
match &mut *old_waker {
Some(old_waker) if waker.will_wake(old_waker) => None,
old => old.replace(waker.clone()),
}
});
if let Some(prev_waker) = prev_waker {
test_debug!("Replaced an old waker in cell, waking");
prev_waker.wake();
}
if let Err(actual) =
test_dbg!(self.compare_exchange(State::REGISTERING, State::WAITING, AcqRel))
{
// If the `compare_exchange` fails above, this means that we were notified for one of
// two reasons: either the cell was awoken, or the cell was closed.
//
// Bail out of the parking state, and determine what to report to the caller.
test_trace!(state =?actual, "was notified");
let waker = self.waker.with_mut(|waker| unsafe { (*waker).take() });
// Reset to the WAITING state by clearing everything *except*
// the closed bits (which must remain set). This `fetch_and`
// does *not* set the CLOSED bit if it is unset, it just doesn't
// clear it.
let state = test_dbg!(self.fetch_and(State::CLOSED, AcqRel));
// The only valid state transition while we were parking is to
// add the CLOSED bit.
debug_assert!(
state == actual || state == actual | State::CLOSED,
"state changed unexpectedly while parking!"
);
if let Some(waker) = waker {
waker.wake();
}
// Was the `CLOSED` bit set while we were clearing other bits?
// If so, the cell is closed. Otherwise, we must have been notified.
if state.contains(State::CLOSED) {
return Poll::Ready(Err(PollWaitError::Closed));
}
return Poll::Ready(Ok(()));
}
// Waker registered, time to yield!
Poll::Pending
}
/// Wait to be woken up by this cell.
///
/// # Returns
///
/// This future completes with the following values:
///
/// - [`Ok`]`(())` if the future was woken by a call to [`wake`] or another
/// task calling [`poll_wait`] or [`wait`] on this [`WaitCell`].
/// - [`Err`]`(`[`Closed`]`)` if the task was woken by a call to [`close`],
/// or the [`WaitCell`] was already closed.
///
/// **Note**: The calling task's [`Waker`] is not registered until AFTER the
/// first time the returned [`Wait`] future is polled. This means that if a
/// call to [`wake`] occurs between when [`wait`] is called and when the
/// future is first polled, the future will *not* complete. If the caller is
/// responsible for performing an operation which will result in an eventual
/// wakeup, prefer calling [`subscribe`] _before_ performing that operation
/// and `.await`ing the [`Wait`] future returned by [`subscribe`].
///
/// [`wake`]: Self::wake
/// [`poll_wait`]: Self::poll_wait
/// [`wait`]: Self::wait
/// [`close`]: Self::close
/// [`subscribe`]: Self::subscribe
pub fn wait(&self) -> Wait<'_> {
Wait {
cell: self,
presubscribe: Poll::Pending,
}
}
/// Eagerly subscribe to notifications from this `WaitCell`.
///
/// This method returns a [`Subscribe`] [`Future`], which outputs a [`Wait`]
/// [`Future`]. Awaiting the [`Subscribe`] future will eagerly register the
/// calling task to be woken by this [`WaitCell`], so that the returned
/// [`Wait`] future will be woken by any calls to [`wake`] (or [`close`])
/// that occur between when the [`Subscribe`] future completes and when the
/// returned [`Wait`] future is `.await`ed.
///
/// This is primarily intended for scenarios where the task that waits on a
/// [`WaitCell`] is responsible for performing some operation that
/// ultimately results in the [`WaitCell`] being woken. If the task were to
/// simply perform the operation and then call [`wait`] on the [`WaitCell`],
/// a potential race condition could occur where the operation completes and
/// wakes the [`WaitCell`] *before* the [`Wait`] future is first `.await`ed.
/// Using `subscribe`, the task can ensure that it is ready to be woken by
/// the cell *before* performing an operation that could result in it being
/// woken.
///
/// These scenarios occur when a wakeup is triggered by another thread/CPU
/// core in response to an operation performed in the task waiting on the
/// `WaitCell`, or when the wakeup is triggered by a hardware interrupt
/// resulting from operations performed in the task.
///
/// # Examples
///
/// ```
/// use maitake::sync::WaitCell;
///
/// // Perform an operation that results in a concurrent wakeup, such as
/// // unmasking an interrupt.
/// fn do_something_that_causes_a_wakeup() {
/// # WAIT_CELL.wake();
/// //...
/// }
///
/// static WAIT_CELL: WaitCell = WaitCell::new();
///
/// # async fn dox() {
/// // Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// // `subscribe` future ensured that our waker was registered to be
/// // woken.
/// wait.await.expect("WaitCell is not closed");
/// # }
/// ```
///
/// [`wait`]: Self::wait
/// [`wake`]: Self::wake
/// [`close`]: Self::close
pub fn subscribe(&self) -> Subscribe<'_> {
Subscribe { cell: self }
}
/// Wake the [`Waker`] stored in this cell.
///
/// # Returns
///
/// - `true` if a waiting task was woken.
/// - `false` if no task was woken (no [`Waker`] was stored in the cell)
pub fn wake(&self) -> bool {
enter_test_debug_span!("WaitCell::wake", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(false) {
waker.wake();
true
} else {
false
}
}
/// Close the [`WaitCell`].
///
/// This wakes any waiting task with an error indicating the `WaitCell` is
/// closed. Subsequent calls to [`wait`] or [`poll_wait`] will return an
/// error indicating that the cell has been closed.
///
/// [`wait`]: Self::wait
/// [`poll_wait`]: Self::poll_wait
pub fn close(&self) -> bool {
enter_test_debug_span!("WaitCell::close", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(true) {
waker.wake();
true
} else {
false
}
}
// TODO(eliza): is this an API we want to have?
/*
/// Returns `true` if this `WaitCell` is [closed](Self::close).
pub(crate) fn is_closed(&self) -> bool {
self.current_state() == State::CLOSED
}
*/
/// Takes this `WaitCell`'s waker.
// TODO(eliza): could probably be made a public API...
pub(crate) fn take_waker(&self, close: bool) -> Option<Waker> {
trace!(wait_cell =?fmt::ptr(self),?close, "notifying");
// Set the WAKING bit (to indicate that we're touching the waker) and
// the WOKEN bit (to indicate that we intend to wake it up).
let state = {
let mut bits = State::WAKING | State::WOKEN;
if close {
bits.0 |= State::CLOSED.0;
}
test_dbg!(self.fetch_or(bits, AcqRel))
};
// Is anyone else touching the waker?
if!test_dbg!(state.contains(State::WAKING | State::REGISTERING | State::CLOSED)) {
// Ladies and gentlemen...we got him (the lock)!
let waker = self.waker.with_mut(|thread| unsafe { (*thread).take() });
// Release the lock.
self.fetch_and(!State::WAKING, Release);
if let Some(waker) = test_dbg!(waker) {
trace!(wait_cell =?fmt::ptr(self),?close,?waker, "notified");
return Some(waker);
}
}
None
}
}
impl WaitCell {
#[inline(always)]
fn compare_exchange(
&self,
State(curr): State,
State(new): State,
success: Ordering,
) -> Result<State, State> {
self.state
.compare_exchange(curr, new, success, Acquire)
.map(State)
.map_err(State)
}
#[inline(always)]
fn fetch_and(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_and(state, order))
}
#[inline(always)]
fn fetch_or(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_or(state, order))
}
#[inline(always)]
fn current_state(&self) -> State {
State(self.state.load(Acquire))
}
}
unsafe impl Send for WaitCell {}
unsafe impl Sync for WaitCell {}
impl fmt::Debug for WaitCell {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WaitCell")
.field("state", &self.current_state())
.field("waker", &fmt::display(".."))
.finish()
}
}
impl Drop for WaitCell {
fn drop(&mut self) {
self.close();
}
}
// === impl Wait ===
impl Future for Wait<'_> {
type Output = Result<(), Closed>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Wait::poll");
// Did a wakeup occur while we were pre-registering the future?
if test_dbg!(self.presubscribe.is_ready()) {
return self.presubscribe;
}
// Okay, actually poll the cell, then.
match task::ready!(test_dbg!(self.cell.poll_wait(cx))) {
Ok(()) => Poll::Ready(Ok(())),
Err(PollWaitError::Closed) => Poll::Ready(Err(Closed(()))),
Err(PollWaitError::Busy) => {
// If some other task was registering, yield and try to re-register
// our waker when that task is done.
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
}
// === impl Subscribe ===
impl<'cell> Future for Subscribe<'cell> {
type Output = Wait<'cell>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Subscribe::poll");
// Pre-register the waker in the cell.
let presubscribe = match test_dbg!(self.cell.poll_wait(cx)) {
Poll::Ready(Err(PollWaitError::Busy)) => {
// Someone else is in the process of registering. Yield now so we
// can wait until that task is done, and then try again.
cx.waker().wake_by_ref();
return Poll::Pending;
}
Poll::Ready(Err(PollWaitError::Closed)) => Poll::Ready(Err(Closed(()))),
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
};
Poll::Ready(Wait {
cell: self.cell,
presubscribe,
})
}
}
// === impl State ===
impl State {
/// /!\ EXTREMELY SERIOUS WARNING! /!\
/// It is LOAD BEARING that the `WAITING` state is represented by zero!
/// This is because we return to the waiting state by `fetch_and`ing out all
/// other bits in a few places. If this state's bit representation is
/// changed to anything other than zero, that code will break! Don't do
/// that!
///
/// YES, FUTURE ELIZA, THIS DOES APPLY TO YOU. YOU ALREADY BROKE IT ONCE.
/// DON'T DO IT AGAIN.
const WAITING: Self = Self(0b0000);
const REGISTERING: Self = Self(0b0001);
const WAKING: Self = Self(0b0010);
const WOKEN: Self = Self(0b0100);
const CLOSED: Self = Self(0b1000);
fn contains(self, Self(state): Self) -> bool {
self.0 & state > 0
}
}
impl ops::BitOr for State {
type Output = Self;
fn bitor(self, Self(rhs): Self) -> Self::Output {
Self(self.0 | rhs)
}
}
impl ops::BitAnd for State {
type Output = Self;
fn bitand(self, Self(rhs): Self) -> Self::Output {
Self(self.0 & rhs)
}
}
impl ops::Not for State {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut has_states = false;
fmt_bits!(self, f, has_states, REGISTERING, WAKING, CLOSED, WOKEN);
if!has_states {
if *self == Self::WAITING {
return f.write_str("WAITING");
}
f.debug_tuple("UnknownState")
.field(&format_args!("{:#b}", self.0))
.finish()?;
}
Ok(())
}
}
#[cfg(all(feature = "alloc", not(loom), test))]
mod tests {
use super::*;
use crate::scheduler::Scheduler;
use alloc::sync::Arc;
use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
#[test]
fn wait_smoke() {
static COMPLETED: AtomicUsize = AtomicUsize::new(0);
let _trace = crate::util::test::trace_init();
let sched = Scheduler::new();
let wait = Arc::new(WaitCell::new());
let wait2 = wait.clone();
sched.spawn(async move {
wait2.wait().await.unwrap();
COMPLETED.fetch_add(1, Ordering::Relaxed);
});
let tick = sched.tick();
assert_eq!(tick.completed, 0);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 0);
assert!(wait.wake());
let tick = sched.tick();
assert_eq!(tick.completed, 1);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 1);
}
/// Reproduces https://github.com/hawkw/mycelium/issues/449
#[test]
fn wait_spurious_poll() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move { cell.wait().await }
});
assert_pending!(task.poll(), "first poll should be pending");
assert_pending!(task.poll(), "second poll should be pending");
cell.wake();
assert_ready_ok!(task.poll(), "should have been woken");
}
#[test]
fn subscribe() {
let _trace = crate::util::test::trace_init();
futures::executor::block_on(async {
let cell = WaitCell::new();
let wait = cell.subscribe().await;
cell.wake();
wait.await.unwrap();
})
}
#[test]
fn wake_before_subscribe() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
cell.wake();
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_ready!(task.poll(), "woken task should complete");
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_pending!(task.poll(), "wait cell hasn't been woken yet");
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
}
#[test]
fn wake_debounce() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
cell.wake();
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
assert!(!task.is_wo | Subscribe | identifier_name |
|
wait_cell.rs | (Eq, PartialEq, Copy, Clone)]
struct State(usize);
// === impl WaitCell ===
impl WaitCell {
loom_const_fn! {
/// Returns a new `WaitCell`, with no [`Waker`] stored in it.
#[must_use]
pub fn new() -> Self {
Self {
state: CachePadded::new(AtomicUsize::new(State::WAITING.0)),
waker: UnsafeCell::new(None),
}
}
}
}
impl WaitCell {
/// Poll to wait on this `WaitCell`, consuming a stored wakeup or
/// registering the [`Waker`] from the provided [`Context`] to be woken by
/// the next wakeup.
///
/// Once a [`Waker`] has been registered, a subsequent call to [`wake`] will
/// wake that [`Waker`].
///
/// # Returns
///
/// - [`Poll::Pending`] if the [`Waker`] was registered. If this method returns
/// [`Poll::Pending`], then the registered [`Waker`] will be woken by a
/// subsequent call to [`wake`].
/// - [`Poll::Ready`]`(`[`Ok`]`(()))` if the cell was woken by a call to
/// [`wake`] while the [`Waker`] was being registered.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Closed`]`))` if the
/// [`WaitCell`] has been closed.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Busy`]`))` if another
/// task was concurrently registering its [`Waker`] with this
/// [`WaitCell`].
///
/// [`wake`]: Self::wake
pub fn poll_wait(&self, cx: &mut Context<'_>) -> Poll<Result<(), PollWaitError>> {
enter_test_debug_span!("WaitCell::poll_wait", cell =?fmt::ptr(self));
// this is based on tokio's AtomicWaker synchronization strategy
match test_dbg!(self.compare_exchange(State::WAITING, State::REGISTERING, Acquire)) {
Err(actual) if test_dbg!(actual.contains(State::CLOSED)) => {
return Poll::Ready(Err(PollWaitError::Closed));
}
Err(actual) if test_dbg!(actual.contains(State::WOKEN)) => {
// take the wakeup
self.fetch_and(!State::WOKEN, Release);
return Poll::Ready(Ok(()));
}
// someone else is notifying, so don't wait!
Err(actual) if test_dbg!(actual.contains(State::WAKING)) => {
return Poll::Ready(Ok(()));
}
Err(_) => return Poll::Ready(Err(PollWaitError::Busy)),
Ok(_) => {}
}
let waker = cx.waker();
trace!(wait_cell =?fmt::ptr(self),?waker, "registering waker");
let prev_waker = self.waker.with_mut(|old_waker| unsafe {
match &mut *old_waker {
Some(old_waker) if waker.will_wake(old_waker) => None,
old => old.replace(waker.clone()),
}
});
if let Some(prev_waker) = prev_waker {
test_debug!("Replaced an old waker in cell, waking");
prev_waker.wake();
}
if let Err(actual) =
test_dbg!(self.compare_exchange(State::REGISTERING, State::WAITING, AcqRel))
{
// If the `compare_exchange` fails above, this means that we were notified for one of
// two reasons: either the cell was awoken, or the cell was closed.
//
// Bail out of the parking state, and determine what to report to the caller.
test_trace!(state =?actual, "was notified");
let waker = self.waker.with_mut(|waker| unsafe { (*waker).take() });
// Reset to the WAITING state by clearing everything *except*
// the closed bits (which must remain set). This `fetch_and`
// does *not* set the CLOSED bit if it is unset, it just doesn't
// clear it.
let state = test_dbg!(self.fetch_and(State::CLOSED, AcqRel));
// The only valid state transition while we were parking is to
// add the CLOSED bit.
debug_assert!(
state == actual || state == actual | State::CLOSED,
"state changed unexpectedly while parking!"
);
if let Some(waker) = waker {
waker.wake();
}
// Was the `CLOSED` bit set while we were clearing other bits?
// If so, the cell is closed. Otherwise, we must have been notified.
if state.contains(State::CLOSED) {
return Poll::Ready(Err(PollWaitError::Closed));
}
return Poll::Ready(Ok(()));
}
// Waker registered, time to yield!
Poll::Pending
}
/// Wait to be woken up by this cell.
///
/// # Returns
///
/// This future completes with the following values:
///
/// - [`Ok`]`(())` if the future was woken by a call to [`wake`] or another
/// task calling [`poll_wait`] or [`wait`] on this [`WaitCell`].
/// - [`Err`]`(`[`Closed`]`)` if the task was woken by a call to [`close`],
/// or the [`WaitCell`] was already closed.
///
/// **Note**: The calling task's [`Waker`] is not registered until AFTER the
/// first time the returned [`Wait`] future is polled. This means that if a
/// call to [`wake`] occurs between when [`wait`] is called and when the
/// future is first polled, the future will *not* complete. If the caller is
/// responsible for performing an operation which will result in an eventual
/// wakeup, prefer calling [`subscribe`] _before_ performing that operation
/// and `.await`ing the [`Wait`] future returned by [`subscribe`].
///
/// [`wake`]: Self::wake
/// [`poll_wait`]: Self::poll_wait
/// [`wait`]: Self::wait
/// [`close`]: Self::close
/// [`subscribe`]: Self::subscribe
pub fn wait(&self) -> Wait<'_> {
Wait {
cell: self,
presubscribe: Poll::Pending,
}
}
/// Eagerly subscribe to notifications from this `WaitCell`.
///
/// This method returns a [`Subscribe`] [`Future`], which outputs a [`Wait`]
/// [`Future`]. Awaiting the [`Subscribe`] future will eagerly register the
/// calling task to be woken by this [`WaitCell`], so that the returned
/// [`Wait`] future will be woken by any calls to [`wake`] (or [`close`])
/// that occur between when the [`Subscribe`] future completes and when the
/// returned [`Wait`] future is `.await`ed.
///
/// This is primarily intended for scenarios where the task that waits on a
/// [`WaitCell`] is responsible for performing some operation that
/// ultimately results in the [`WaitCell`] being woken. If the task were to
/// simply perform the operation and then call [`wait`] on the [`WaitCell`],
/// a potential race condition could occur where the operation completes and
/// wakes the [`WaitCell`] *before* the [`Wait`] future is first `.await`ed.
/// Using `subscribe`, the task can ensure that it is ready to be woken by
/// the cell *before* performing an operation that could result in it being
/// woken.
///
/// These scenarios occur when a wakeup is triggered by another thread/CPU
/// core in response to an operation performed in the task waiting on the
/// `WaitCell`, or when the wakeup is triggered by a hardware interrupt
/// resulting from operations performed in the task.
///
/// # Examples
///
/// ```
/// use maitake::sync::WaitCell;
///
/// // Perform an operation that results in a concurrent wakeup, such as
/// // unmasking an interrupt.
/// fn do_something_that_causes_a_wakeup() {
/// # WAIT_CELL.wake();
/// //...
/// }
///
/// static WAIT_CELL: WaitCell = WaitCell::new();
///
/// # async fn dox() {
/// // Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// // `subscribe` future ensured that our waker was registered to be
/// // woken.
/// wait.await.expect("WaitCell is not closed");
/// # }
/// ```
///
/// [`wait`]: Self::wait
/// [`wake`]: Self::wake
/// [`close`]: Self::close
pub fn subscribe(&self) -> Subscribe<'_> {
Subscribe { cell: self }
}
/// Wake the [`Waker`] stored in this cell.
///
/// # Returns
///
/// - `true` if a waiting task was woken.
/// - `false` if no task was woken (no [`Waker`] was stored in the cell)
pub fn wake(&self) -> bool {
enter_test_debug_span!("WaitCell::wake", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(false) {
waker.wake();
true
} else {
false
}
}
/// Close the [`WaitCell`].
///
/// This wakes any waiting task with an error indicating the `WaitCell` is
/// closed. Subsequent calls to [`wait`] or [`poll_wait`] will return an
/// error indicating that the cell has been closed.
///
/// [`wait`]: Self::wait
/// [`poll_wait`]: Self::poll_wait
pub fn close(&self) -> bool {
enter_test_debug_span!("WaitCell::close", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(true) {
waker.wake();
true
} else {
false
}
}
// TODO(eliza): is this an API we want to have?
/*
/// Returns `true` if this `WaitCell` is [closed](Self::close).
pub(crate) fn is_closed(&self) -> bool {
self.current_state() == State::CLOSED
}
*/
/// Takes this `WaitCell`'s waker.
// TODO(eliza): could probably be made a public API...
pub(crate) fn take_waker(&self, close: bool) -> Option<Waker> {
trace!(wait_cell =?fmt::ptr(self),?close, "notifying");
// Set the WAKING bit (to indicate that we're touching the waker) and
// the WOKEN bit (to indicate that we intend to wake it up).
let state = {
let mut bits = State::WAKING | State::WOKEN;
if close {
bits.0 |= State::CLOSED.0;
}
test_dbg!(self.fetch_or(bits, AcqRel))
};
// Is anyone else touching the waker?
if!test_dbg!(state.contains(State::WAKING | State::REGISTERING | State::CLOSED)) {
// Ladies and gentlemen...we got him (the lock)!
let waker = self.waker.with_mut(|thread| unsafe { (*thread).take() });
// Release the lock.
self.fetch_and(!State::WAKING, Release);
if let Some(waker) = test_dbg!(waker) {
trace!(wait_cell =?fmt::ptr(self),?close,?waker, "notified");
return Some(waker);
}
}
None
}
}
impl WaitCell {
#[inline(always)]
fn compare_exchange(
&self,
State(curr): State,
State(new): State,
success: Ordering,
) -> Result<State, State> {
self.state
.compare_exchange(curr, new, success, Acquire)
.map(State)
.map_err(State)
}
#[inline(always)]
fn fetch_and(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_and(state, order))
}
#[inline(always)]
fn fetch_or(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_or(state, order))
}
#[inline(always)]
fn current_state(&self) -> State {
State(self.state.load(Acquire))
}
}
unsafe impl Send for WaitCell {}
unsafe impl Sync for WaitCell {}
impl fmt::Debug for WaitCell {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WaitCell")
.field("state", &self.current_state())
.field("waker", &fmt::display(".."))
.finish()
}
}
impl Drop for WaitCell {
fn drop(&mut self) {
self.close();
}
}
// === impl Wait ===
impl Future for Wait<'_> {
type Output = Result<(), Closed>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Wait::poll");
// Did a wakeup occur while we were pre-registering the future?
if test_dbg!(self.presubscribe.is_ready()) {
return self.presubscribe;
}
// Okay, actually poll the cell, then.
match task::ready!(test_dbg!(self.cell.poll_wait(cx))) {
Ok(()) => Poll::Ready(Ok(())),
Err(PollWaitError::Closed) => Poll::Ready(Err(Closed(()))),
Err(PollWaitError::Busy) => {
// If some other task was registering, yield and try to re-register
// our waker when that task is done.
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
}
// === impl Subscribe ===
impl<'cell> Future for Subscribe<'cell> {
type Output = Wait<'cell>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Subscribe::poll");
// Pre-register the waker in the cell.
let presubscribe = match test_dbg!(self.cell.poll_wait(cx)) {
Poll::Ready(Err(PollWaitError::Busy)) => {
// Someone else is in the process of registering. Yield now so we
// can wait until that task is done, and then try again.
cx.waker().wake_by_ref();
return Poll::Pending;
}
Poll::Ready(Err(PollWaitError::Closed)) => Poll::Ready(Err(Closed(()))),
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
};
Poll::Ready(Wait {
cell: self.cell,
presubscribe,
})
}
}
// === impl State ===
impl State {
/// /!\ EXTREMELY SERIOUS WARNING! /!\
/// It is LOAD BEARING that the `WAITING` state is represented by zero!
/// This is because we return to the waiting state by `fetch_and`ing out all
/// other bits in a few places. If this state's bit representation is
/// changed to anything other than zero, that code will break! Don't do
/// that!
///
/// YES, FUTURE ELIZA, THIS DOES APPLY TO YOU. YOU ALREADY BROKE IT ONCE.
/// DON'T DO IT AGAIN.
const WAITING: Self = Self(0b0000);
const REGISTERING: Self = Self(0b0001);
const WAKING: Self = Self(0b0010);
const WOKEN: Self = Self(0b0100);
const CLOSED: Self = Self(0b1000);
fn contains(self, Self(state): Self) -> bool {
self.0 & state > 0
}
}
impl ops::BitOr for State {
type Output = Self;
fn bitor(self, Self(rhs): Self) -> Self::Output {
Self(self.0 | rhs)
}
}
impl ops::BitAnd for State {
type Output = Self;
fn bitand(self, Self(rhs): Self) -> Self::Output {
Self(self.0 & rhs)
}
}
impl ops::Not for State {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut has_states = false;
fmt_bits!(self, f, has_states, REGISTERING, WAKING, CLOSED, WOKEN);
if!has_states |
Ok(())
}
}
#[cfg(all(feature = "alloc", not(loom), test))]
mod tests {
use super::*;
use crate::scheduler::Scheduler;
use alloc::sync::Arc;
use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
#[test]
fn wait_smoke() {
static COMPLETED: AtomicUsize = AtomicUsize::new(0);
let _trace = crate::util::test::trace_init();
let sched = Scheduler::new();
let wait = Arc::new(WaitCell::new());
let wait2 = wait.clone();
sched.spawn(async move {
wait2.wait().await.unwrap();
COMPLETED.fetch_add(1, Ordering::Relaxed);
});
let tick = sched.tick();
assert_eq!(tick.completed, 0);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 0);
assert!(wait.wake());
let tick = sched.tick();
assert_eq!(tick.completed, 1);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 1);
}
/// Reproduces https://github.com/hawkw/mycelium/issues/449
#[test]
fn wait_spurious_poll() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move { cell.wait().await }
});
assert_pending!(task.poll(), "first poll should be pending");
assert_pending!(task.poll(), "second poll should be pending");
cell.wake();
assert_ready_ok!(task.poll(), "should have been woken");
}
#[test]
fn subscribe() {
let _trace = crate::util::test::trace_init();
futures::executor::block_on(async {
let cell = WaitCell::new();
let wait = cell.subscribe().await;
cell.wake();
wait.await.unwrap();
})
}
#[test]
fn wake_before_subscribe() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
cell.wake();
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_ready!(task.poll(), "woken task should complete");
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_pending!(task.poll(), "wait cell hasn't been woken yet");
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
}
#[test]
fn wake_debounce() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
cell.wake();
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
assert!(!task.is_woken());
cell.wake();
assert!(task.is_woken());
| {
if *self == Self::WAITING {
return f.write_str("WAITING");
}
f.debug_tuple("UnknownState")
.field(&format_args!("{:#b}", self.0))
.finish()?;
} | conditional_block |
wait_cell.rs | derive(Eq, PartialEq, Copy, Clone)]
struct State(usize);
// === impl WaitCell ===
impl WaitCell {
loom_const_fn! {
/// Returns a new `WaitCell`, with no [`Waker`] stored in it.
#[must_use]
pub fn new() -> Self {
Self {
state: CachePadded::new(AtomicUsize::new(State::WAITING.0)),
waker: UnsafeCell::new(None),
}
}
}
}
impl WaitCell {
/// Poll to wait on this `WaitCell`, consuming a stored wakeup or
/// registering the [`Waker`] from the provided [`Context`] to be woken by
/// the next wakeup.
///
/// Once a [`Waker`] has been registered, a subsequent call to [`wake`] will
/// wake that [`Waker`].
///
/// # Returns
///
/// - [`Poll::Pending`] if the [`Waker`] was registered. If this method returns
/// [`Poll::Pending`], then the registered [`Waker`] will be woken by a
/// subsequent call to [`wake`].
/// - [`Poll::Ready`]`(`[`Ok`]`(()))` if the cell was woken by a call to
/// [`wake`] while the [`Waker`] was being registered.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Closed`]`))` if the
/// [`WaitCell`] has been closed.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Busy`]`))` if another
/// task was concurrently registering its [`Waker`] with this
/// [`WaitCell`].
///
/// [`wake`]: Self::wake
pub fn poll_wait(&self, cx: &mut Context<'_>) -> Poll<Result<(), PollWaitError>> {
enter_test_debug_span!("WaitCell::poll_wait", cell =?fmt::ptr(self));
// this is based on tokio's AtomicWaker synchronization strategy
match test_dbg!(self.compare_exchange(State::WAITING, State::REGISTERING, Acquire)) {
Err(actual) if test_dbg!(actual.contains(State::CLOSED)) => {
return Poll::Ready(Err(PollWaitError::Closed));
}
Err(actual) if test_dbg!(actual.contains(State::WOKEN)) => {
// take the wakeup
self.fetch_and(!State::WOKEN, Release);
return Poll::Ready(Ok(()));
}
// someone else is notifying, so don't wait!
Err(actual) if test_dbg!(actual.contains(State::WAKING)) => {
return Poll::Ready(Ok(()));
}
Err(_) => return Poll::Ready(Err(PollWaitError::Busy)),
Ok(_) => {}
}
let waker = cx.waker();
trace!(wait_cell =?fmt::ptr(self),?waker, "registering waker");
let prev_waker = self.waker.with_mut(|old_waker| unsafe {
match &mut *old_waker {
Some(old_waker) if waker.will_wake(old_waker) => None,
old => old.replace(waker.clone()),
}
});
if let Some(prev_waker) = prev_waker {
test_debug!("Replaced an old waker in cell, waking");
prev_waker.wake();
}
if let Err(actual) =
test_dbg!(self.compare_exchange(State::REGISTERING, State::WAITING, AcqRel))
{
// If the `compare_exchange` fails above, this means that we were notified for one of
// two reasons: either the cell was awoken, or the cell was closed.
//
// Bail out of the parking state, and determine what to report to the caller.
test_trace!(state =?actual, "was notified");
let waker = self.waker.with_mut(|waker| unsafe { (*waker).take() });
// Reset to the WAITING state by clearing everything *except*
// the closed bits (which must remain set). This `fetch_and`
// does *not* set the CLOSED bit if it is unset, it just doesn't
// clear it.
let state = test_dbg!(self.fetch_and(State::CLOSED, AcqRel));
// The only valid state transition while we were parking is to
// add the CLOSED bit.
debug_assert!(
state == actual || state == actual | State::CLOSED,
"state changed unexpectedly while parking!"
);
if let Some(waker) = waker {
waker.wake();
}
// Was the `CLOSED` bit set while we were clearing other bits?
// If so, the cell is closed. Otherwise, we must have been notified.
if state.contains(State::CLOSED) {
return Poll::Ready(Err(PollWaitError::Closed));
}
return Poll::Ready(Ok(()));
}
// Waker registered, time to yield!
Poll::Pending
}
/// Wait to be woken up by this cell.
///
/// # Returns
///
/// This future completes with the following values:
///
/// - [`Ok`]`(())` if the future was woken by a call to [`wake`] or another
/// task calling [`poll_wait`] or [`wait`] on this [`WaitCell`].
/// - [`Err`]`(`[`Closed`]`)` if the task was woken by a call to [`close`],
/// or the [`WaitCell`] was already closed.
///
/// **Note**: The calling task's [`Waker`] is not registered until AFTER the
/// first time the returned [`Wait`] future is polled. This means that if a
/// call to [`wake`] occurs between when [`wait`] is called and when the
/// future is first polled, the future will *not* complete. If the caller is
/// responsible for performing an operation which will result in an eventual
/// wakeup, prefer calling [`subscribe`] _before_ performing that operation
/// and `.await`ing the [`Wait`] future returned by [`subscribe`].
///
/// [`wake`]: Self::wake
/// [`poll_wait`]: Self::poll_wait
/// [`wait`]: Self::wait
/// [`close`]: Self::close
/// [`subscribe`]: Self::subscribe
pub fn wait(&self) -> Wait<'_> {
Wait {
cell: self,
presubscribe: Poll::Pending,
}
}
/// Eagerly subscribe to notifications from this `WaitCell`.
///
/// This method returns a [`Subscribe`] [`Future`], which outputs a [`Wait`]
/// [`Future`]. Awaiting the [`Subscribe`] future will eagerly register the
/// calling task to be woken by this [`WaitCell`], so that the returned
/// [`Wait`] future will be woken by any calls to [`wake`] (or [`close`])
/// that occur between when the [`Subscribe`] future completes and when the
/// returned [`Wait`] future is `.await`ed.
///
/// This is primarily intended for scenarios where the task that waits on a
/// [`WaitCell`] is responsible for performing some operation that
/// ultimately results in the [`WaitCell`] being woken. If the task were to
/// simply perform the operation and then call [`wait`] on the [`WaitCell`],
/// a potential race condition could occur where the operation completes and
/// wakes the [`WaitCell`] *before* the [`Wait`] future is first `.await`ed.
/// Using `subscribe`, the task can ensure that it is ready to be woken by
/// the cell *before* performing an operation that could result in it being
/// woken.
///
/// These scenarios occur when a wakeup is triggered by another thread/CPU
/// core in response to an operation performed in the task waiting on the
/// `WaitCell`, or when the wakeup is triggered by a hardware interrupt
/// resulting from operations performed in the task.
///
/// # Examples
///
/// ```
/// use maitake::sync::WaitCell;
///
/// // Perform an operation that results in a concurrent wakeup, such as
/// // unmasking an interrupt.
/// fn do_something_that_causes_a_wakeup() {
/// # WAIT_CELL.wake();
/// //...
/// }
///
/// static WAIT_CELL: WaitCell = WaitCell::new();
///
/// # async fn dox() {
/// // Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// // `subscribe` future ensured that our waker was registered to be
/// // woken.
/// wait.await.expect("WaitCell is not closed");
/// # }
/// ```
///
/// [`wait`]: Self::wait
/// [`wake`]: Self::wake
/// [`close`]: Self::close
pub fn subscribe(&self) -> Subscribe<'_> {
Subscribe { cell: self }
}
/// Wake the [`Waker`] stored in this cell.
///
/// # Returns
///
/// - `true` if a waiting task was woken.
/// - `false` if no task was woken (no [`Waker`] was stored in the cell)
pub fn wake(&self) -> bool {
enter_test_debug_span!("WaitCell::wake", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(false) {
waker.wake();
true
} else {
false
}
}
/// Close the [`WaitCell`].
///
/// This wakes any waiting task with an error indicating the `WaitCell` is
/// closed. Subsequent calls to [`wait`] or [`poll_wait`] will return an
/// error indicating that the cell has been closed.
///
/// [`wait`]: Self::wait
/// [`poll_wait`]: Self::poll_wait
pub fn close(&self) -> bool {
enter_test_debug_span!("WaitCell::close", cell =?fmt::ptr(self));
if let Some(waker) = self.take_waker(true) {
waker.wake();
true
} else {
false
}
}
// TODO(eliza): is this an API we want to have?
/*
/// Returns `true` if this `WaitCell` is [closed](Self::close).
pub(crate) fn is_closed(&self) -> bool {
self.current_state() == State::CLOSED
}
*/
/// Takes this `WaitCell`'s waker.
// TODO(eliza): could probably be made a public API...
pub(crate) fn take_waker(&self, close: bool) -> Option<Waker> {
trace!(wait_cell =?fmt::ptr(self),?close, "notifying");
// Set the WAKING bit (to indicate that we're touching the waker) and
// the WOKEN bit (to indicate that we intend to wake it up).
let state = {
let mut bits = State::WAKING | State::WOKEN;
if close {
bits.0 |= State::CLOSED.0;
}
test_dbg!(self.fetch_or(bits, AcqRel))
};
// Is anyone else touching the waker?
if!test_dbg!(state.contains(State::WAKING | State::REGISTERING | State::CLOSED)) {
// Ladies and gentlemen...we got him (the lock)!
let waker = self.waker.with_mut(|thread| unsafe { (*thread).take() });
// Release the lock.
self.fetch_and(!State::WAKING, Release);
if let Some(waker) = test_dbg!(waker) {
trace!(wait_cell =?fmt::ptr(self),?close,?waker, "notified");
return Some(waker);
}
}
None
}
}
impl WaitCell {
#[inline(always)]
fn compare_exchange(
&self,
State(curr): State,
State(new): State,
success: Ordering,
) -> Result<State, State> {
self.state
.compare_exchange(curr, new, success, Acquire)
.map(State)
.map_err(State)
}
#[inline(always)]
fn fetch_and(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_and(state, order))
}
#[inline(always)]
fn fetch_or(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_or(state, order))
}
#[inline(always)]
fn current_state(&self) -> State {
State(self.state.load(Acquire))
}
}
unsafe impl Send for WaitCell {}
unsafe impl Sync for WaitCell {}
impl fmt::Debug for WaitCell {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WaitCell")
.field("state", &self.current_state())
.field("waker", &fmt::display(".."))
.finish()
}
}
impl Drop for WaitCell {
fn drop(&mut self) {
self.close();
}
}
// === impl Wait ===
impl Future for Wait<'_> {
type Output = Result<(), Closed>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Wait::poll");
// Did a wakeup occur while we were pre-registering the future?
if test_dbg!(self.presubscribe.is_ready()) {
return self.presubscribe;
}
// Okay, actually poll the cell, then.
match task::ready!(test_dbg!(self.cell.poll_wait(cx))) {
Ok(()) => Poll::Ready(Ok(())),
Err(PollWaitError::Closed) => Poll::Ready(Err(Closed(()))),
Err(PollWaitError::Busy) => {
// If some other task was registering, yield and try to re-register
// our waker when that task is done.
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
}
// === impl Subscribe ===
impl<'cell> Future for Subscribe<'cell> {
type Output = Wait<'cell>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Subscribe::poll");
// Pre-register the waker in the cell.
let presubscribe = match test_dbg!(self.cell.poll_wait(cx)) { | Poll::Ready(Err(PollWaitError::Busy)) => {
// Someone else is in the process of registering. Yield now so we
// can wait until that task is done, and then try again.
cx.waker().wake_by_ref();
return Poll::Pending;
}
Poll::Ready(Err(PollWaitError::Closed)) => Poll::Ready(Err(Closed(()))),
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
};
Poll::Ready(Wait {
cell: self.cell,
presubscribe,
})
}
}
// === impl State ===
impl State {
/// /!\ EXTREMELY SERIOUS WARNING! /!\
/// It is LOAD BEARING that the `WAITING` state is represented by zero!
/// This is because we return to the waiting state by `fetch_and`ing out all
/// other bits in a few places. If this state's bit representation is
/// changed to anything other than zero, that code will break! Don't do
/// that!
///
/// YES, FUTURE ELIZA, THIS DOES APPLY TO YOU. YOU ALREADY BROKE IT ONCE.
/// DON'T DO IT AGAIN.
const WAITING: Self = Self(0b0000);
const REGISTERING: Self = Self(0b0001);
const WAKING: Self = Self(0b0010);
const WOKEN: Self = Self(0b0100);
const CLOSED: Self = Self(0b1000);
fn contains(self, Self(state): Self) -> bool {
self.0 & state > 0
}
}
impl ops::BitOr for State {
type Output = Self;
fn bitor(self, Self(rhs): Self) -> Self::Output {
Self(self.0 | rhs)
}
}
impl ops::BitAnd for State {
type Output = Self;
fn bitand(self, Self(rhs): Self) -> Self::Output {
Self(self.0 & rhs)
}
}
impl ops::Not for State {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut has_states = false;
fmt_bits!(self, f, has_states, REGISTERING, WAKING, CLOSED, WOKEN);
if!has_states {
if *self == Self::WAITING {
return f.write_str("WAITING");
}
f.debug_tuple("UnknownState")
.field(&format_args!("{:#b}", self.0))
.finish()?;
}
Ok(())
}
}
#[cfg(all(feature = "alloc", not(loom), test))]
mod tests {
use super::*;
use crate::scheduler::Scheduler;
use alloc::sync::Arc;
use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
#[test]
fn wait_smoke() {
static COMPLETED: AtomicUsize = AtomicUsize::new(0);
let _trace = crate::util::test::trace_init();
let sched = Scheduler::new();
let wait = Arc::new(WaitCell::new());
let wait2 = wait.clone();
sched.spawn(async move {
wait2.wait().await.unwrap();
COMPLETED.fetch_add(1, Ordering::Relaxed);
});
let tick = sched.tick();
assert_eq!(tick.completed, 0);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 0);
assert!(wait.wake());
let tick = sched.tick();
assert_eq!(tick.completed, 1);
assert_eq!(COMPLETED.load(Ordering::Relaxed), 1);
}
/// Reproduces https://github.com/hawkw/mycelium/issues/449
#[test]
fn wait_spurious_poll() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move { cell.wait().await }
});
assert_pending!(task.poll(), "first poll should be pending");
assert_pending!(task.poll(), "second poll should be pending");
cell.wake();
assert_ready_ok!(task.poll(), "should have been woken");
}
#[test]
fn subscribe() {
let _trace = crate::util::test::trace_init();
futures::executor::block_on(async {
let cell = WaitCell::new();
let wait = cell.subscribe().await;
cell.wake();
wait.await.unwrap();
})
}
#[test]
fn wake_before_subscribe() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
cell.wake();
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_ready!(task.poll(), "woken task should complete");
let mut task = task::spawn({
let cell = cell.clone();
async move {
let wait = cell.subscribe().await;
wait.await.unwrap();
}
});
assert_pending!(task.poll(), "wait cell hasn't been woken yet");
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
}
#[test]
fn wake_debounce() {
let _trace = crate::util::test::trace_init();
let cell = Arc::new(WaitCell::new());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
cell.wake();
cell.wake();
assert!(task.is_woken());
assert_ready!(task.poll());
let mut task = task::spawn({
let cell = cell.clone();
async move {
cell.wait().await.unwrap();
}
});
assert_pending!(task.poll());
assert!(!task.is_woken());
cell.wake();
assert!(task.is_woken());
| random_line_split |
|
text.rs | Normalization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z') | || (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new | || c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}') | random_line_split |
text.rs | ization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") { | else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
|
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} | conditional_block |
text.rs | ization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
| ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
| if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// | identifier_body |
text.rs | /// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new_self
}
}
fn test_resolver() -> Box<dyn EntityResolver> {
let resolver = TestResolver::new();
Box::new(resolver)
}
#[test]
fn test_norma | lize_avalue_entity_resolver() {
| identifier_name |
|
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
{
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
}
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn | <Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
}
| assert_compile_grpc_protos | identifier_name |
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
|
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn assert_compile_grpc_protos<Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
}
| {
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
} | identifier_body |
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
{
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
}
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute | .as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn assert_compile_grpc_protos<Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
} | .iter()
.map(String::as_str)
.collect::<Vec<&str>>() | random_line_split |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 | else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_),..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control),..} => {
if frame_control.sequence_number!= 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0,..}), frame_control: Some(_),..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control),..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} | conditional_block |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError { | use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_),..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control),..} => {
if frame_control.sequence_number!= 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0,..}), frame_control: Some(_),..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control),..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
} | fn description(&self) -> &str { | random_line_split |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_),..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control),..} => {
if frame_control.sequence_number!= 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0,..}), frame_control: Some(_),..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control),..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn | (&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| drop | identifier_name |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error |
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_),..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control),..} => {
if frame_control.sequence_number!= 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0,..}), frame_control: Some(_),..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control),..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
} | identifier_body |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 | else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8,..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
| {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} | conditional_block |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8,..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> |
}
| {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
} | identifier_body |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct | {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8,..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
| Connection | identifier_name |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8,..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap(); | "selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
} |
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}", | random_line_split |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn sym(self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32,..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32,..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p!= endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16,..17],
dec_max_code: &mut [u32,..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 |
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32,..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32,..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32,..$symbol_count],
codes: [::prefix_code::PrefixCode,..$symbol_count],
decoder_table: [u16,..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32,..18],
dec_first_offset: [u16,..17],
dec_offset_to_sym: [u16,..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32,..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0),..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8,..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if!for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s!= 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8,..10];
let mut codes = [PrefixCode::new(0, 0),..10];
let mut symbols = [OrdFreq::new(0, 0),..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res!= 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
}
| {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
} | conditional_block |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn | (self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32,..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32,..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p!= endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16,..17],
dec_max_code: &mut [u32,..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32,..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32,..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32,..$symbol_count],
codes: [::prefix_code::PrefixCode,..$symbol_count],
decoder_table: [u16,..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32,..18],
dec_first_offset: [u16,..17],
dec_offset_to_sym: [u16,..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32,..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0),..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8,..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if!for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s!= 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8,..10];
let mut codes = [PrefixCode::new(0, 0),..10];
let mut symbols = [OrdFreq::new(0, 0),..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res!= 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
}
| sym | identifier_name |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn sym(self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32,..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32,..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p!= endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16,..17],
dec_max_code: &mut [u32,..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32,..MAX_EXPECTED_CODE_SIZE + 1] = [0,..MAX_EXPECTED_CODE_SIZE + 1]; |
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code!= (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32,..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32,..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32,..$symbol_count],
codes: [::prefix_code::PrefixCode,..$symbol_count],
decoder_table: [u16,..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32,..18],
dec_first_offset: [u16,..17],
dec_offset_to_sym: [u16,..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32,..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0),..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8,..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq,..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if!for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s!= 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8,..10];
let mut codes = [PrefixCode::new(0, 0),..10];
let mut symbols = [OrdFreq::new(0, 0),..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res!= 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
} | let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
} | random_line_split |
main.rs | 5);
/// ```
pub fn diverges() ->! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a, b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while!done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0,..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x,..) = tuple;
let mut x = 5;
match x {
ref name @ 1... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
| impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool {!self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn method(&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
Vec::new()
}
}
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T:?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let mut file = try | random_line_split |
|
main.rs | );
/// ```
pub fn diverges() ->! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a, b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while!done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0,..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x,..) = tuple;
let mut x = 5;
match x {
ref name @ 1... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool {!self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn | (&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
Vec::new()
}
}
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T:?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let mut file = | method | identifier_name |
main.rs | /// ```
pub fn diverges() ->! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a, b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while!done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0,..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x,..) = tuple;
let mut x = 5;
match x {
ref name @ 1... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool {!self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn method(&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
|
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T:?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let | Vec::new()
}
}
| identifier_body |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
|
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
} | impl Default for Sort {
fn default() -> Self {
Self::Desc
}
} | random_line_split |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => | ,
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
} | conditional_block |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn | (&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| validate | identifier_name |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> | }
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
))); | identifier_body |
lib.rs | _module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn | (jit: bool) -> Box<dyn TargetIsa +'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if!jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> {
use std::io::{Error, ErrorKind};
use std::process::Command;
// link the.o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if!status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
| get_isa | identifier_name |
lib.rs | _module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn get_isa(jit: bool) -> Box<dyn TargetIsa +'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if!jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> |
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> {
use std::io::{Error, ErrorKind};
use std::process::Command;
// link the.o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if!status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
| {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
} | identifier_body |
lib.rs | lift_module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn get_isa(jit: bool) -> Box<dyn TargetIsa +'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if!jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> { | // link the.o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if!status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
| use std::io::{Error, ErrorKind};
use std::process::Command;
| random_line_split |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while!args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if!untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if!add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if!remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if!tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
| &ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(),?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message( | conditional_block |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while!args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args), | let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if!untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if!add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if!remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if!tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(),?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
} | CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode | random_line_split |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while!args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if!untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if!add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if!remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if!tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds. | tch mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(),?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| clone();
ma | identifier_name |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args,.. } => (msg, args),
CommandData::Interaction {.. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while!args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
| let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if!untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if!add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if!remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if!tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.b
ackgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(),?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
| identifier_body |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter), | );
}
for int_counter in 1..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0.. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn get_quad_indices(&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
}
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
} | UniformValue::Float(parameters.floats[float_counter as usize - 1]) | random_line_split |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter),
UniformValue::Float(parameters.floats[float_counter as usize - 1])
);
}
for int_counter in 1..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0.. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn | (&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
}
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
}
| get_quad_indices | identifier_name |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter),
UniformValue::Float(parameters.floats[float_counter as usize - 1])
);
}
for int_counter in 1..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0.. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn get_quad_indices(&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders |
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
}
| {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
} | conditional_block |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base
}
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...",... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
}
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum | {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
}
| Provider | identifier_name |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base
}
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...",... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> |
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum Provider {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
}
| {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
} | identifier_body |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base | }
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...",... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
}
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum Provider {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
} | random_line_split |
|
lib.rs | Mime without having to import so many enums.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate mime;
///
/// # fn main() {
/// let json = mime!(Application/Json);
/// let plain = mime!(Text/Plain; Charset=Utf8);
/// let text = mime!(Text/Html; Charset=("bar"), ("baz")=("quux"));
/// let img = mime!(Image/_);
/// # }
/// ```
#[macro_export]
macro_rules! mime {
($top:tt / $sub:tt) => (
mime!($top / $sub;)
);
($top:tt / $sub:tt ; $($attr:tt = $val:tt),*) => (
$crate::Mime(
__mime__ident_or_ext!(TopLevel::$top),
__mime__ident_or_ext!(SubLevel::$sub),
vec![ $((__mime__ident_or_ext!(Attr::$attr), __mime__ident_or_ext!(Value::$val))),* ]
)
);
}
#[doc(hidden)]
#[macro_export]
macro_rules! __mime__ident_or_ext {
($enoom:ident::_) => (
$crate::$enoom::Star
);
($enoom:ident::($inner:expr)) => (
$crate::$enoom::Ext($inner.to_string())
);
($enoom:ident::$var:ident) => (
$crate::$enoom::$var
)
}
macro_rules! enoom {
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl $en {
pub fn as_str(&self) -> &str {
match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => &s
}
}
}
impl ::std::ops::Deref for $en {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl PartialEq for $en {
#[inline]
fn eq(&self, other: &$en) -> bool {
match (self, other) {
$( (&$en::$ty, &$en::$ty) => true ),*,
(&$en::$ext(ref a), &$en::$ext(ref b)) => a == b,
(_, _) => self.as_str() == other.as_str(),
}
}
}
impl PartialEq<String> for $en {
fn eq(&self, other: &String) -> bool {
self.as_str() == other
}
}
impl PartialEq<str> for $en {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<'a> PartialEq<&'a str> for $en {
fn eq(&self, other: &&'a str) -> bool {
self.as_str() == *other
}
}
impl PartialEq<$en> for String {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl PartialEq<$en> for str {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl<'a> PartialEq<$en> for &'a str {
fn eq(&self, other: &$en) -> bool {
*self == other.as_str()
}
}
impl fmt::Display for $en {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => s
})
}
}
impl FromStr for $en {
type Err = ();
fn from_str(s: &str) -> Result<$en, ()> {
Ok(match s {
$(_s if _s == $text => $en::$ty),*,
s => $en::$ext(s.to_string())
})
}
}
#[cfg(feature = "heapsize")]
impl heapsize::HeapSizeOf for $en {
fn heap_size_of_children(&self) -> usize {
match *self {
$en::$ext(ref ext) => ext.heap_size_of_children(),
_ => 0,
}
}
}
)
}
enoom! {
pub enum TopLevel;
Ext;
Star, "*";
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
Ext;
Star, "*";
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
EventStream, "event-stream";
// common application/*
Json, "json";
WwwFormUrlEncoded, "x-www-form-urlencoded";
Msgpack, "msgpack";
OctetStream, "octet-stream";
// multipart/*
FormData, "form-data";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
// audio/*
Mpeg, "mpeg";
Mp4, "mp4";
Ogg, "ogg";
}
enoom! {
pub enum Attr;
Ext;
Charset, "charset";
Boundary, "boundary";
Q, "q";
}
enoom! {
pub enum Value;
Ext;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl<T: AsRef<[Param]>> fmt::Display for Mime<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// It's much faster to write a single string, as opposed to push
// several parts through f.write_str(). So, check for the most common
// mime types, and fast track them.
if let TopLevel::Text = self.0 {
if let SubLevel::Plain = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("text/plain");
} else if &[(Attr::Charset, Value::Utf8)] == attrs {
return f.write_str("text/plain; charset=utf-8");
}
}
} else if let TopLevel::Application = self.0 {
if let SubLevel::Json = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("application/json");
}
}
} else if let TopLevel::Star = self.0 {
if let SubLevel::Star = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("*/*");
}
}
}
// slower general purpose fmt
try!(fmt::Display::fmt(&self.0, f));
try!(f.write_str("/"));
try!(fmt::Display::fmt(&self.1, f));
for param in self.2.as_ref() {
try!(f.write_str("; "));
try!(fmt::Display::fmt(¶m.0, f));
try!(f.write_str("="));
try!(fmt::Display::fmt(¶m.1, f));
}
Ok(())
}
}
impl<P: AsRef<[Param]>> Mime<P> {
pub fn get_param<A: PartialEq<Attr>>(&self, attr: A) -> Option<&Value> {
self.2.as_ref().iter().find(|&&(ref name, _)| attr == *name).map(|&(_, ref value)| value)
}
}
impl FromStr for Mime {
type Err = ();
fn from_str(raw: &str) -> Result<Mime, ()> {
if raw == "*/*" {
return Ok(mime!(Star/Star));
}
let ascii = raw.to_ascii_lowercase(); // lifetimes :(
let len = ascii.len();
let mut iter = ascii.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let top;
loop {
match iter.next() {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(&ascii[..i]) {
Ok(t) => {
top = t;
start = i + 1;
break;
}
Err(_) => return Err(())
},
_ => return Err(()) // EOF and no toplevel is no Mime
};
}
// sublevel
let sub;
let mut sub_star = false;
loop {
match iter.next() {
Some((i, '*')) if i == start => {
sub_star = true;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if!sub_star && i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(s) => {
sub = s;
start = i + 1;
break;
}
Err(_) => return Err(())
},
None => match FromStr::from_str(&ascii[start..]) {
Ok(s) => return Ok(Mime(top, s, params)),
Err(_) => return Err(())
},
_ => return Err(())
};
}
// params
debug!("starting params, len={}", len);
loop {
match param_from_str(raw, &ascii, &mut iter, start) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Ok(Mime(top, sub, params))
}
}
#[cfg(feature = "serde")]
impl serde::ser::Serialize for Mime {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::ser::Serializer
{
serializer.serialize_str(&*format!("{}",self))
}
}
#[cfg(feature = "serde")]
impl serde::de::Deserialize for Mime {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: serde::de::Deserializer
{
let string: String = try!(serde::Deserialize::deserialize(deserializer));
let mime: Mime = match FromStr::from_str(&*string) {
Ok(mime) => mime,
Err(_) => return Err(serde::de::Error::custom("Invalid serialized mime")),
};
Ok(mime)
}
}
fn param_from_str(raw: &str, ascii: &str, iter: &mut Enumerate<Chars>, mut start: usize) -> Option<(Param, usize)> {
let attr;
debug!("param_from_str, start={}", start);
loop {
match iter.next() {
Some((i,'')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(a) => {
attr = a;
start = i + 1;
break;
},
Err(_) => return None
},
_ => return None
}
}
let value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
{
let substr = |a,b| { if attr==Attr::Charset { &ascii[a..b] } else { &raw[a..b] } };
let endstr = |a| { if attr==Attr::Charset { &ascii[a..] } else { &raw[a..] } };
loop {
match iter.next() {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
None => match FromStr::from_str(endstr(start)) {
Ok(v) => {
value = v;
start = raw.len();
break;
},
Err(_) => return None
},
_ => return None
}
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn is_restricted_name_char(c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
#[cfg(feature = "nightly")]
use test::Bencher;
use super::{Mime, Value, Attr};
#[test]
fn test_mime_show() {
let mime = mime!(Text/Plain);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = mime!(Text/Plain; Charset=Utf8);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(Mime::from_str("text/plain").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("TEXT/PLAIN").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("text/plain; charset=utf-8").unwrap(), mime!(Text/Plain; Charset=Utf8));
assert_eq!(Mime::from_str("text/plain;charset=\"utf-8\"").unwrap(), mime!(Text/Plain; Charset=Utf8));
assert_eq!(Mime::from_str("text/plain; charset=utf-8; foo=bar").unwrap(),
mime!(Text/Plain; Charset=Utf8, ("foo")=("bar")));
assert_eq!("*/*".parse::<Mime>().unwrap(), mime!(Star/Star));
assert_eq!("image/*".parse::<Mime>().unwrap(), mime!(Image/Star));
assert_eq!("text/*; charset=utf-8".parse::<Mime>().unwrap(), mime!(Text/Star; Charset=Utf8));
assert!("*/png".parse::<Mime>().is_err());
assert!("*image/png".parse::<Mime>().is_err());
assert!("text/*plain".parse::<Mime>().is_err());
}
| #[test]
fn test_case_sensitive_values() {
assert_eq!(Mime::from_str("multipart/form-data; boundary=ABCDEFG").unwrap(), | random_line_split |
|
lib.rs | !("matched json!"),
/// _ => ()
/// }
/// ```
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub struct Mime<T: AsRef<[Param]> = Vec<Param>>(pub TopLevel, pub SubLevel, pub T);
#[cfg(feature = "heapsize")]
impl<T: AsRef<[Param]> + heapsize::HeapSizeOf> heapsize::HeapSizeOf for Mime<T> {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children() +
self.1.heap_size_of_children() +
self.2.heap_size_of_children()
}
}
impl<LHS: AsRef<[Param]>, RHS: AsRef<[Param]>> PartialEq<Mime<RHS>> for Mime<LHS> {
#[inline]
fn eq(&self, other: &Mime<RHS>) -> bool {
self.0 == other.0 && self.1 == other.1 && self.2.as_ref() == other.2.as_ref()
}
}
/// Easily create a Mime without having to import so many enums.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate mime;
///
/// # fn main() {
/// let json = mime!(Application/Json);
/// let plain = mime!(Text/Plain; Charset=Utf8);
/// let text = mime!(Text/Html; Charset=("bar"), ("baz")=("quux"));
/// let img = mime!(Image/_);
/// # }
/// ```
#[macro_export]
macro_rules! mime {
($top:tt / $sub:tt) => (
mime!($top / $sub;)
);
($top:tt / $sub:tt ; $($attr:tt = $val:tt),*) => (
$crate::Mime(
__mime__ident_or_ext!(TopLevel::$top),
__mime__ident_or_ext!(SubLevel::$sub),
vec![ $((__mime__ident_or_ext!(Attr::$attr), __mime__ident_or_ext!(Value::$val))),* ]
)
);
}
#[doc(hidden)]
#[macro_export]
macro_rules! __mime__ident_or_ext {
($enoom:ident::_) => (
$crate::$enoom::Star
);
($enoom:ident::($inner:expr)) => (
$crate::$enoom::Ext($inner.to_string())
);
($enoom:ident::$var:ident) => (
$crate::$enoom::$var
)
}
macro_rules! enoom {
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl $en {
pub fn as_str(&self) -> &str {
match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => &s
}
}
}
impl ::std::ops::Deref for $en {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl PartialEq for $en {
#[inline]
fn eq(&self, other: &$en) -> bool {
match (self, other) {
$( (&$en::$ty, &$en::$ty) => true ),*,
(&$en::$ext(ref a), &$en::$ext(ref b)) => a == b,
(_, _) => self.as_str() == other.as_str(),
}
}
}
impl PartialEq<String> for $en {
fn eq(&self, other: &String) -> bool {
self.as_str() == other
}
}
impl PartialEq<str> for $en {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<'a> PartialEq<&'a str> for $en {
fn eq(&self, other: &&'a str) -> bool {
self.as_str() == *other
}
}
impl PartialEq<$en> for String {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl PartialEq<$en> for str {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl<'a> PartialEq<$en> for &'a str {
fn eq(&self, other: &$en) -> bool {
*self == other.as_str()
}
}
impl fmt::Display for $en {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => s
})
}
}
impl FromStr for $en {
type Err = ();
fn from_str(s: &str) -> Result<$en, ()> {
Ok(match s {
$(_s if _s == $text => $en::$ty),*,
s => $en::$ext(s.to_string())
})
}
}
#[cfg(feature = "heapsize")]
impl heapsize::HeapSizeOf for $en {
fn heap_size_of_children(&self) -> usize {
match *self {
$en::$ext(ref ext) => ext.heap_size_of_children(),
_ => 0,
}
}
}
)
}
enoom! {
pub enum TopLevel;
Ext;
Star, "*";
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
Ext;
Star, "*";
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
EventStream, "event-stream";
// common application/*
Json, "json";
WwwFormUrlEncoded, "x-www-form-urlencoded";
Msgpack, "msgpack";
OctetStream, "octet-stream";
// multipart/*
FormData, "form-data";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
// audio/*
Mpeg, "mpeg";
Mp4, "mp4";
Ogg, "ogg";
}
enoom! {
pub enum Attr;
Ext;
Charset, "charset";
Boundary, "boundary";
Q, "q";
}
enoom! {
pub enum Value;
Ext;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl<T: AsRef<[Param]>> fmt::Display for Mime<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// It's much faster to write a single string, as opposed to push
// several parts through f.write_str(). So, check for the most common
// mime types, and fast track them.
if let TopLevel::Text = self.0 {
if let SubLevel::Plain = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("text/plain");
} else if &[(Attr::Charset, Value::Utf8)] == attrs {
return f.write_str("text/plain; charset=utf-8");
}
}
} else if let TopLevel::Application = self.0 | else if let TopLevel::Star = self.0 {
if let SubLevel::Star = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("*/*");
}
}
}
// slower general purpose fmt
try!(fmt::Display::fmt(&self.0, f));
try!(f.write_str("/"));
try!(fmt::Display::fmt(&self.1, f));
for param in self.2.as_ref() {
try!(f.write_str("; "));
try!(fmt::Display::fmt(¶m.0, f));
try!(f.write_str("="));
try!(fmt::Display::fmt(¶m.1, f));
}
Ok(())
}
}
impl<P: AsRef<[Param]>> Mime<P> {
pub fn get_param<A: PartialEq<Attr>>(&self, attr: A) -> Option<&Value> {
self.2.as_ref().iter().find(|&&(ref name, _)| attr == *name).map(|&(_, ref value)| value)
}
}
impl FromStr for Mime {
type Err = ();
fn from_str(raw: &str) -> Result<Mime, ()> {
if raw == "*/*" {
return Ok(mime!(Star/Star));
}
let ascii = raw.to_ascii_lowercase(); // lifetimes :(
let len = ascii.len();
let mut iter = ascii.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let top;
loop {
match iter.next() {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(&ascii[..i]) {
Ok(t) => {
top = t;
start = i + 1;
break;
}
Err(_) => return Err(())
},
_ => return Err(()) // EOF and no toplevel is no Mime
};
}
// sublevel
let sub;
let mut sub_star = false;
loop {
match iter.next() {
Some((i, '*')) if i == start => {
sub_star = true;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if!sub_star && i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(s) => {
sub = s;
start = i + 1;
break;
}
Err(_) => return Err(())
},
None => match FromStr::from_str(&ascii[start..]) {
Ok(s) => return Ok(Mime(top, s, params)),
Err(_) => return Err(())
},
_ => return Err(())
};
}
// params
debug!("starting params, len={}", len);
loop {
match param_from_str(raw, &ascii, &mut iter, start) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Ok(Mime(top, sub, params))
}
}
#[cfg(feature = "serde")]
impl serde::ser::Serialize for Mime {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::ser::Serializer
{
serializer.serialize_str(&*format!("{}",self))
}
}
#[cfg(feature = "serde")]
impl serde::de::Deserialize for Mime {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: serde::de::Deserializer
{
let string: String = try!(serde::Deserialize::deserialize(deserializer));
let mime: Mime = match FromStr::from_str(&*string) {
Ok(mime) => mime,
Err(_) => return Err(serde::de::Error::custom("Invalid serialized mime")),
};
Ok(mime)
}
}
fn param_from_str(raw: &str, ascii: &str, iter: &mut Enumerate<Chars>, mut start: usize) -> Option<(Param, usize)> {
let attr;
debug!("param_from_str, start={}", start);
loop {
match iter.next() {
Some((i,'')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(a) => {
attr = a;
start = i + 1;
break;
},
Err(_) => return None
},
_ => return None
}
}
let value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
{
let substr = |a,b| { if attr==Attr::Charset { &ascii[a..b] } else { &raw[a..b] } };
let endstr = |a| { if attr==Attr::Charset { &ascii[a..] } else { &raw[a..] } };
loop {
match iter.next() {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
None => match FromStr::from_str(endstr(start)) {
Ok(v) => {
value = v;
start = raw.len();
break;
},
Err(_) => return None
},
_ => return None
}
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn is_restricted_name_char(c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
#[cfg(feature = "nightly")]
use test::Bencher;
use super::{Mime, Value, Attr};
#[test]
fn test_mime_show() {
let mime = mime!(Text/Plain);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = mime!(Text/Plain; Charset=Utf8);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(Mime::from_str("text/plain").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("TEXT/PLAIN").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("text/plain; charset=utf-8").unwrap(), mime!(Text/Plain; Charset=Utf8));
assert_eq!(Mime::from_str("text/plain;charset=\"utf | {
if let SubLevel::Json = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("application/json");
}
}
} | conditional_block |
lib.rs | !("matched json!"),
/// _ => ()
/// }
/// ```
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub struct Mime<T: AsRef<[Param]> = Vec<Param>>(pub TopLevel, pub SubLevel, pub T);
#[cfg(feature = "heapsize")]
impl<T: AsRef<[Param]> + heapsize::HeapSizeOf> heapsize::HeapSizeOf for Mime<T> {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children() +
self.1.heap_size_of_children() +
self.2.heap_size_of_children()
}
}
impl<LHS: AsRef<[Param]>, RHS: AsRef<[Param]>> PartialEq<Mime<RHS>> for Mime<LHS> {
#[inline]
fn eq(&self, other: &Mime<RHS>) -> bool {
self.0 == other.0 && self.1 == other.1 && self.2.as_ref() == other.2.as_ref()
}
}
/// Easily create a Mime without having to import so many enums.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate mime;
///
/// # fn main() {
/// let json = mime!(Application/Json);
/// let plain = mime!(Text/Plain; Charset=Utf8);
/// let text = mime!(Text/Html; Charset=("bar"), ("baz")=("quux"));
/// let img = mime!(Image/_);
/// # }
/// ```
#[macro_export]
macro_rules! mime {
($top:tt / $sub:tt) => (
mime!($top / $sub;)
);
($top:tt / $sub:tt ; $($attr:tt = $val:tt),*) => (
$crate::Mime(
__mime__ident_or_ext!(TopLevel::$top),
__mime__ident_or_ext!(SubLevel::$sub),
vec![ $((__mime__ident_or_ext!(Attr::$attr), __mime__ident_or_ext!(Value::$val))),* ]
)
);
}
#[doc(hidden)]
#[macro_export]
macro_rules! __mime__ident_or_ext {
($enoom:ident::_) => (
$crate::$enoom::Star
);
($enoom:ident::($inner:expr)) => (
$crate::$enoom::Ext($inner.to_string())
);
($enoom:ident::$var:ident) => (
$crate::$enoom::$var
)
}
macro_rules! enoom {
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl $en {
pub fn as_str(&self) -> &str {
match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => &s
}
}
}
impl ::std::ops::Deref for $en {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl PartialEq for $en {
#[inline]
fn eq(&self, other: &$en) -> bool {
match (self, other) {
$( (&$en::$ty, &$en::$ty) => true ),*,
(&$en::$ext(ref a), &$en::$ext(ref b)) => a == b,
(_, _) => self.as_str() == other.as_str(),
}
}
}
impl PartialEq<String> for $en {
fn eq(&self, other: &String) -> bool {
self.as_str() == other
}
}
impl PartialEq<str> for $en {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<'a> PartialEq<&'a str> for $en {
fn eq(&self, other: &&'a str) -> bool {
self.as_str() == *other
}
}
impl PartialEq<$en> for String {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl PartialEq<$en> for str {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl<'a> PartialEq<$en> for &'a str {
fn eq(&self, other: &$en) -> bool {
*self == other.as_str()
}
}
impl fmt::Display for $en {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => s
})
}
}
impl FromStr for $en {
type Err = ();
fn from_str(s: &str) -> Result<$en, ()> {
Ok(match s {
$(_s if _s == $text => $en::$ty),*,
s => $en::$ext(s.to_string())
})
}
}
#[cfg(feature = "heapsize")]
impl heapsize::HeapSizeOf for $en {
fn heap_size_of_children(&self) -> usize {
match *self {
$en::$ext(ref ext) => ext.heap_size_of_children(),
_ => 0,
}
}
}
)
}
enoom! {
pub enum TopLevel;
Ext;
Star, "*";
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
Ext;
Star, "*";
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
EventStream, "event-stream";
// common application/*
Json, "json";
WwwFormUrlEncoded, "x-www-form-urlencoded";
Msgpack, "msgpack";
OctetStream, "octet-stream";
// multipart/*
FormData, "form-data";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
// audio/*
Mpeg, "mpeg";
Mp4, "mp4";
Ogg, "ogg";
}
enoom! {
pub enum Attr;
Ext;
Charset, "charset";
Boundary, "boundary";
Q, "q";
}
enoom! {
pub enum Value;
Ext;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl<T: AsRef<[Param]>> fmt::Display for Mime<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// It's much faster to write a single string, as opposed to push
// several parts through f.write_str(). So, check for the most common
// mime types, and fast track them.
if let TopLevel::Text = self.0 {
if let SubLevel::Plain = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("text/plain");
} else if &[(Attr::Charset, Value::Utf8)] == attrs {
return f.write_str("text/plain; charset=utf-8");
}
}
} else if let TopLevel::Application = self.0 {
if let SubLevel::Json = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("application/json");
}
}
} else if let TopLevel::Star = self.0 {
if let SubLevel::Star = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("*/*");
}
}
}
// slower general purpose fmt
try!(fmt::Display::fmt(&self.0, f));
try!(f.write_str("/"));
try!(fmt::Display::fmt(&self.1, f));
for param in self.2.as_ref() {
try!(f.write_str("; "));
try!(fmt::Display::fmt(¶m.0, f));
try!(f.write_str("="));
try!(fmt::Display::fmt(¶m.1, f));
}
Ok(())
}
}
impl<P: AsRef<[Param]>> Mime<P> {
pub fn get_param<A: PartialEq<Attr>>(&self, attr: A) -> Option<&Value> {
self.2.as_ref().iter().find(|&&(ref name, _)| attr == *name).map(|&(_, ref value)| value)
}
}
impl FromStr for Mime {
type Err = ();
fn from_str(raw: &str) -> Result<Mime, ()> {
if raw == "*/*" {
return Ok(mime!(Star/Star));
}
let ascii = raw.to_ascii_lowercase(); // lifetimes :(
let len = ascii.len();
let mut iter = ascii.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let top;
loop {
match iter.next() {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(&ascii[..i]) {
Ok(t) => {
top = t;
start = i + 1;
break;
}
Err(_) => return Err(())
},
_ => return Err(()) // EOF and no toplevel is no Mime
};
}
// sublevel
let sub;
let mut sub_star = false;
loop {
match iter.next() {
Some((i, '*')) if i == start => {
sub_star = true;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if!sub_star && i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(s) => {
sub = s;
start = i + 1;
break;
}
Err(_) => return Err(())
},
None => match FromStr::from_str(&ascii[start..]) {
Ok(s) => return Ok(Mime(top, s, params)),
Err(_) => return Err(())
},
_ => return Err(())
};
}
// params
debug!("starting params, len={}", len);
loop {
match param_from_str(raw, &ascii, &mut iter, start) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Ok(Mime(top, sub, params))
}
}
#[cfg(feature = "serde")]
impl serde::ser::Serialize for Mime {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::ser::Serializer
{
serializer.serialize_str(&*format!("{}",self))
}
}
#[cfg(feature = "serde")]
impl serde::de::Deserialize for Mime {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: serde::de::Deserializer
{
let string: String = try!(serde::Deserialize::deserialize(deserializer));
let mime: Mime = match FromStr::from_str(&*string) {
Ok(mime) => mime,
Err(_) => return Err(serde::de::Error::custom("Invalid serialized mime")),
};
Ok(mime)
}
}
fn param_from_str(raw: &str, ascii: &str, iter: &mut Enumerate<Chars>, mut start: usize) -> Option<(Param, usize)> {
let attr;
debug!("param_from_str, start={}", start);
loop {
match iter.next() {
Some((i,'')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(a) => {
attr = a;
start = i + 1;
break;
},
Err(_) => return None
},
_ => return None
}
}
let value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
{
let substr = |a,b| { if attr==Attr::Charset { &ascii[a..b] } else { &raw[a..b] } };
let endstr = |a| { if attr==Attr::Charset { &ascii[a..] } else { &raw[a..] } };
loop {
match iter.next() {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
None => match FromStr::from_str(endstr(start)) {
Ok(v) => {
value = v;
start = raw.len();
break;
},
Err(_) => return None
},
_ => return None
}
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn | (c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
#[cfg(feature = "nightly")]
use test::Bencher;
use super::{Mime, Value, Attr};
#[test]
fn test_mime_show() {
let mime = mime!(Text/Plain);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = mime!(Text/Plain; Charset=Utf8);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(Mime::from_str("text/plain").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("TEXT/PLAIN").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("text/plain; charset=utf-8").unwrap(), mime!(Text/Plain; Charset=Utf8));
assert_eq!(Mime::from_str("text/plain;charset=\"utf | is_restricted_name_char | identifier_name |
lib.rs | !("matched json!"),
/// _ => ()
/// }
/// ```
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub struct Mime<T: AsRef<[Param]> = Vec<Param>>(pub TopLevel, pub SubLevel, pub T);
#[cfg(feature = "heapsize")]
impl<T: AsRef<[Param]> + heapsize::HeapSizeOf> heapsize::HeapSizeOf for Mime<T> {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children() +
self.1.heap_size_of_children() +
self.2.heap_size_of_children()
}
}
impl<LHS: AsRef<[Param]>, RHS: AsRef<[Param]>> PartialEq<Mime<RHS>> for Mime<LHS> {
#[inline]
fn eq(&self, other: &Mime<RHS>) -> bool {
self.0 == other.0 && self.1 == other.1 && self.2.as_ref() == other.2.as_ref()
}
}
/// Easily create a Mime without having to import so many enums.
///
/// # Example
///
/// ```
/// # #[macro_use] extern crate mime;
///
/// # fn main() {
/// let json = mime!(Application/Json);
/// let plain = mime!(Text/Plain; Charset=Utf8);
/// let text = mime!(Text/Html; Charset=("bar"), ("baz")=("quux"));
/// let img = mime!(Image/_);
/// # }
/// ```
#[macro_export]
macro_rules! mime {
($top:tt / $sub:tt) => (
mime!($top / $sub;)
);
($top:tt / $sub:tt ; $($attr:tt = $val:tt),*) => (
$crate::Mime(
__mime__ident_or_ext!(TopLevel::$top),
__mime__ident_or_ext!(SubLevel::$sub),
vec![ $((__mime__ident_or_ext!(Attr::$attr), __mime__ident_or_ext!(Value::$val))),* ]
)
);
}
#[doc(hidden)]
#[macro_export]
macro_rules! __mime__ident_or_ext {
($enoom:ident::_) => (
$crate::$enoom::Star
);
($enoom:ident::($inner:expr)) => (
$crate::$enoom::Ext($inner.to_string())
);
($enoom:ident::$var:ident) => (
$crate::$enoom::$var
)
}
macro_rules! enoom {
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl $en {
pub fn as_str(&self) -> &str {
match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => &s
}
}
}
impl ::std::ops::Deref for $en {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl PartialEq for $en {
#[inline]
fn eq(&self, other: &$en) -> bool {
match (self, other) {
$( (&$en::$ty, &$en::$ty) => true ),*,
(&$en::$ext(ref a), &$en::$ext(ref b)) => a == b,
(_, _) => self.as_str() == other.as_str(),
}
}
}
impl PartialEq<String> for $en {
fn eq(&self, other: &String) -> bool {
self.as_str() == other
}
}
impl PartialEq<str> for $en {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl<'a> PartialEq<&'a str> for $en {
fn eq(&self, other: &&'a str) -> bool {
self.as_str() == *other
}
}
impl PartialEq<$en> for String {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl PartialEq<$en> for str {
fn eq(&self, other: &$en) -> bool {
self == other.as_str()
}
}
impl<'a> PartialEq<$en> for &'a str {
fn eq(&self, other: &$en) -> bool {
*self == other.as_str()
}
}
impl fmt::Display for $en {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
$($en::$ty => $text),*,
$en::$ext(ref s) => s
})
}
}
impl FromStr for $en {
type Err = ();
fn from_str(s: &str) -> Result<$en, ()> {
Ok(match s {
$(_s if _s == $text => $en::$ty),*,
s => $en::$ext(s.to_string())
})
}
}
#[cfg(feature = "heapsize")]
impl heapsize::HeapSizeOf for $en {
fn heap_size_of_children(&self) -> usize {
match *self {
$en::$ext(ref ext) => ext.heap_size_of_children(),
_ => 0,
}
}
}
)
}
enoom! {
pub enum TopLevel;
Ext;
Star, "*";
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
Ext;
Star, "*";
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
EventStream, "event-stream";
// common application/*
Json, "json";
WwwFormUrlEncoded, "x-www-form-urlencoded";
Msgpack, "msgpack";
OctetStream, "octet-stream";
// multipart/*
FormData, "form-data";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
// audio/*
Mpeg, "mpeg";
Mp4, "mp4";
Ogg, "ogg";
}
enoom! {
pub enum Attr;
Ext;
Charset, "charset";
Boundary, "boundary";
Q, "q";
}
enoom! {
pub enum Value;
Ext;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl<T: AsRef<[Param]>> fmt::Display for Mime<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result | } else if let TopLevel::Star = self.0 {
if let SubLevel::Star = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("*/*");
}
}
}
// slower general purpose fmt
try!(fmt::Display::fmt(&self.0, f));
try!(f.write_str("/"));
try!(fmt::Display::fmt(&self.1, f));
for param in self.2.as_ref() {
try!(f.write_str("; "));
try!(fmt::Display::fmt(¶m.0, f));
try!(f.write_str("="));
try!(fmt::Display::fmt(¶m.1, f));
}
Ok(())
}
}
impl<P: AsRef<[Param]>> Mime<P> {
pub fn get_param<A: PartialEq<Attr>>(&self, attr: A) -> Option<&Value> {
self.2.as_ref().iter().find(|&&(ref name, _)| attr == *name).map(|&(_, ref value)| value)
}
}
impl FromStr for Mime {
type Err = ();
fn from_str(raw: &str) -> Result<Mime, ()> {
if raw == "*/*" {
return Ok(mime!(Star/Star));
}
let ascii = raw.to_ascii_lowercase(); // lifetimes :(
let len = ascii.len();
let mut iter = ascii.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let top;
loop {
match iter.next() {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(&ascii[..i]) {
Ok(t) => {
top = t;
start = i + 1;
break;
}
Err(_) => return Err(())
},
_ => return Err(()) // EOF and no toplevel is no Mime
};
}
// sublevel
let sub;
let mut sub_star = false;
loop {
match iter.next() {
Some((i, '*')) if i == start => {
sub_star = true;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if!sub_star && i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(s) => {
sub = s;
start = i + 1;
break;
}
Err(_) => return Err(())
},
None => match FromStr::from_str(&ascii[start..]) {
Ok(s) => return Ok(Mime(top, s, params)),
Err(_) => return Err(())
},
_ => return Err(())
};
}
// params
debug!("starting params, len={}", len);
loop {
match param_from_str(raw, &ascii, &mut iter, start) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Ok(Mime(top, sub, params))
}
}
#[cfg(feature = "serde")]
impl serde::ser::Serialize for Mime {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::ser::Serializer
{
serializer.serialize_str(&*format!("{}",self))
}
}
#[cfg(feature = "serde")]
impl serde::de::Deserialize for Mime {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: serde::de::Deserializer
{
let string: String = try!(serde::Deserialize::deserialize(deserializer));
let mime: Mime = match FromStr::from_str(&*string) {
Ok(mime) => mime,
Err(_) => return Err(serde::de::Error::custom("Invalid serialized mime")),
};
Ok(mime)
}
}
fn param_from_str(raw: &str, ascii: &str, iter: &mut Enumerate<Chars>, mut start: usize) -> Option<(Param, usize)> {
let attr;
debug!("param_from_str, start={}", start);
loop {
match iter.next() {
Some((i,'')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(&ascii[start..i]) {
Ok(a) => {
attr = a;
start = i + 1;
break;
},
Err(_) => return None
},
_ => return None
}
}
let value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
{
let substr = |a,b| { if attr==Attr::Charset { &ascii[a..b] } else { &raw[a..b] } };
let endstr = |a| { if attr==Attr::Charset { &ascii[a..] } else { &raw[a..] } };
loop {
match iter.next() {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(substr(start,i)) {
Ok(v) => {
value = v;
start = i + 1;
break;
},
Err(_) => return None
},
None => match FromStr::from_str(endstr(start)) {
Ok(v) => {
value = v;
start = raw.len();
break;
},
Err(_) => return None
},
_ => return None
}
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn is_restricted_name_char(c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
#[cfg(feature = "nightly")]
use test::Bencher;
use super::{Mime, Value, Attr};
#[test]
fn test_mime_show() {
let mime = mime!(Text/Plain);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = mime!(Text/Plain; Charset=Utf8);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(Mime::from_str("text/plain").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("TEXT/PLAIN").unwrap(), mime!(Text/Plain));
assert_eq!(Mime::from_str("text/plain; charset=utf-8").unwrap(), mime!(Text/Plain; Charset=Utf8));
assert_eq!(Mime::from_str("text/plain;charset=\"utf | {
// It's much faster to write a single string, as opposed to push
// several parts through f.write_str(). So, check for the most common
// mime types, and fast track them.
if let TopLevel::Text = self.0 {
if let SubLevel::Plain = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("text/plain");
} else if &[(Attr::Charset, Value::Utf8)] == attrs {
return f.write_str("text/plain; charset=utf-8");
}
}
} else if let TopLevel::Application = self.0 {
if let SubLevel::Json = self.1 {
let attrs = self.2.as_ref();
if attrs.len() == 0 {
return f.write_str("application/json");
}
} | identifier_body |
shuf.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) cmdline evec seps rvec fdata
use clap::{crate_version, Arg, ArgAction, Command};
use memchr::memchr_iter;
use rand::prelude::SliceRandom;
use rand::RngCore;
use std::fs::File;
use std::io::{stdin, stdout, BufReader, BufWriter, Read, Write};
use uucore::display::Quotable;
use uucore::error::{FromIo, UResult, USimpleError};
use uucore::{format_usage, help_about, help_usage};
mod rand_read_adapter;
enum Mode {
Default(String),
Echo(Vec<String>),
InputRange((usize, usize)),
}
static USAGE: &str = help_usage!("shuf.md");
static ABOUT: &str = help_about!("shuf.md");
struct Options {
head_count: usize,
output: Option<String>,
random_source: Option<String>,
repeat: bool,
sep: u8,
}
mod options {
pub static ECHO: &str = "echo";
pub static INPUT_RANGE: &str = "input-range";
pub static HEAD_COUNT: &str = "head-count";
pub static OUTPUT: &str = "output";
pub static RANDOM_SOURCE: &str = "random-source";
pub static REPEAT: &str = "repeat";
pub static ZERO_TERMINATED: &str = "zero-terminated";
pub static FILE: &str = "file";
}
#[uucore::main]
pub fn uumain(args: impl uucore::Args) -> UResult<()> | .to_string(),
)
};
let options = Options {
head_count: {
let headcounts = matches
.get_many::<String>(options::HEAD_COUNT)
.unwrap_or_default()
.map(|s| s.to_owned())
.collect();
match parse_head_count(headcounts) {
Ok(val) => val,
Err(msg) => return Err(USimpleError::new(1, msg)),
}
},
output: matches.get_one::<String>(options::OUTPUT).map(String::from),
random_source: matches
.get_one::<String>(options::RANDOM_SOURCE)
.map(String::from),
repeat: matches.get_flag(options::REPEAT),
sep: if matches.get_flag(options::ZERO_TERMINATED) {
0x00_u8
} else {
0x0a_u8
},
};
match mode {
Mode::Echo(args) => {
let mut evec = args.iter().map(String::as_bytes).collect::<Vec<_>>();
find_seps(&mut evec, options.sep);
shuf_bytes(&mut evec, options)?;
}
Mode::InputRange((b, e)) => {
let rvec = (b..e).map(|x| format!("{x}")).collect::<Vec<String>>();
let mut rvec = rvec.iter().map(String::as_bytes).collect::<Vec<&[u8]>>();
shuf_bytes(&mut rvec, options)?;
}
Mode::Default(filename) => {
let fdata = read_input_file(&filename)?;
let mut fdata = vec![&fdata[..]];
find_seps(&mut fdata, options.sep);
shuf_bytes(&mut fdata, options)?;
}
}
Ok(())
}
pub fn uu_app() -> Command {
Command::new(uucore::util_name())
.about(ABOUT)
.version(crate_version!())
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.args_override_self(true)
.arg(
Arg::new(options::ECHO)
.short('e')
.long(options::ECHO)
.value_name("ARG")
.help("treat each ARG as an input line")
.use_value_delimiter(false)
.num_args(0..)
.conflicts_with(options::INPUT_RANGE),
)
.arg(
Arg::new(options::INPUT_RANGE)
.short('i')
.long(options::INPUT_RANGE)
.value_name("LO-HI")
.help("treat each number LO through HI as an input line")
.conflicts_with(options::FILE),
)
.arg(
Arg::new(options::HEAD_COUNT)
.short('n')
.long(options::HEAD_COUNT)
.value_name("COUNT")
.help("output at most COUNT lines"),
)
.arg(
Arg::new(options::OUTPUT)
.short('o')
.long(options::OUTPUT)
.value_name("FILE")
.help("write result to FILE instead of standard output")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::RANDOM_SOURCE)
.long(options::RANDOM_SOURCE)
.value_name("FILE")
.help("get random bytes from FILE")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::REPEAT)
.short('r')
.long(options::REPEAT)
.help("output lines can be repeated")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new(options::ZERO_TERMINATED)
.short('z')
.long(options::ZERO_TERMINATED)
.help("line delimiter is NUL, not newline")
.action(ArgAction::SetTrue),
)
.arg(Arg::new(options::FILE).value_hint(clap::ValueHint::FilePath))
}
fn read_input_file(filename: &str) -> UResult<Vec<u8>> {
let mut file = BufReader::new(if filename == "-" {
Box::new(stdin()) as Box<dyn Read>
} else {
let file = File::open(filename)
.map_err_context(|| format!("failed to open {}", filename.quote()))?;
Box::new(file) as Box<dyn Read>
});
let mut data = Vec::new();
file.read_to_end(&mut data)
.map_err_context(|| format!("failed reading {}", filename.quote()))?;
Ok(data)
}
fn find_seps(data: &mut Vec<&[u8]>, sep: u8) {
// need to use for loop so we don't borrow the vector as we modify it in place
// basic idea:
// * We don't care about the order of the result. This lets us slice the slices
// without making a new vector.
// * Starting from the end of the vector, we examine each element.
// * If that element contains the separator, we remove it from the vector,
// and then sub-slice it into slices that do not contain the separator.
// * We maintain the invariant throughout that each element in the vector past
// the ith element does not have any separators remaining.
for i in (0..data.len()).rev() {
if data[i].contains(&sep) {
let this = data.swap_remove(i);
let mut p = 0;
for i in memchr_iter(sep, this) {
data.push(&this[p..i]);
p = i + 1;
}
if p < this.len() {
data.push(&this[p..]);
}
}
}
}
fn shuf_bytes(input: &mut Vec<&[u8]>, opts: Options) -> UResult<()> {
let mut output = BufWriter::new(match opts.output {
None => Box::new(stdout()) as Box<dyn Write>,
Some(s) => {
let file = File::create(&s[..])
.map_err_context(|| format!("failed to open {} for writing", s.quote()))?;
Box::new(file) as Box<dyn Write>
}
});
let mut rng = match opts.random_source {
Some(r) => {
let file = File::open(&r[..])
.map_err_context(|| format!("failed to open random source {}", r.quote()))?;
WrappedRng::RngFile(rand_read_adapter::ReadRng::new(file))
}
None => WrappedRng::RngDefault(rand::thread_rng()),
};
if input.is_empty() {
return Ok(());
}
if opts.repeat {
for _ in 0..opts.head_count {
// Returns None is the slice is empty. We checked this before, so
// this is safe.
let r = input.choose(&mut rng).unwrap();
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
} else {
let (shuffled, _) = input.partial_shuffle(&mut rng, opts.head_count);
for r in shuffled {
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
}
Ok(())
}
fn parse_range(input_range: &str) -> Result<(usize, usize), String> {
if let Some((from, to)) = input_range.split_once('-') {
let begin = from
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", from.quote()))?;
let end = to
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", to.quote()))?;
Ok((begin, end + 1))
} else {
Err(format!("invalid input range: {}", input_range.quote()))
}
}
fn parse_head_count(headcounts: Vec<String>) -> Result<usize, String> {
let mut result = std::usize::MAX;
for count in headcounts {
match count.parse::<usize>() {
Ok(pv) => result = std::cmp::min(result, pv),
Err(_) => return Err(format!("invalid line count: {}", count.quote())),
}
}
Ok(result)
}
enum WrappedRng {
RngFile(rand_read_adapter::ReadRng<File>),
RngDefault(rand::rngs::ThreadRng),
}
impl RngCore for WrappedRng {
fn next_u32(&mut self) -> u32 {
match self {
Self::RngFile(r) => r.next_u32(),
Self::RngDefault(r) => r.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self {
Self::RngFile(r) => r.next_u64(),
Self::RngDefault(r) => r.next_u64(),
}
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
match self {
Self::RngFile(r) => r.fill_bytes(dest),
Self::RngDefault(r) => r.fill_bytes(dest),
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
match self {
Self::RngFile(r) => r.try_fill_bytes(dest),
Self::RngDefault(r) => r.try_fill_bytes(dest),
}
}
}
| {
let args = args.collect_lossy();
let matches = uu_app().try_get_matches_from(args)?;
let mode = if let Some(args) = matches.get_many::<String>(options::ECHO) {
Mode::Echo(args.map(String::from).collect())
} else if let Some(range) = matches.get_one::<String>(options::INPUT_RANGE) {
match parse_range(range) {
Ok(m) => Mode::InputRange(m),
Err(msg) => {
return Err(USimpleError::new(1, msg));
}
}
} else {
Mode::Default(
matches
.get_one::<String>(options::FILE)
.map(|s| s.as_str())
.unwrap_or("-") | identifier_body |
shuf.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) cmdline evec seps rvec fdata
use clap::{crate_version, Arg, ArgAction, Command};
use memchr::memchr_iter;
use rand::prelude::SliceRandom;
use rand::RngCore;
use std::fs::File;
use std::io::{stdin, stdout, BufReader, BufWriter, Read, Write};
use uucore::display::Quotable;
use uucore::error::{FromIo, UResult, USimpleError};
use uucore::{format_usage, help_about, help_usage};
mod rand_read_adapter;
enum Mode {
Default(String),
Echo(Vec<String>),
InputRange((usize, usize)),
}
static USAGE: &str = help_usage!("shuf.md");
static ABOUT: &str = help_about!("shuf.md");
struct Options {
head_count: usize,
output: Option<String>,
random_source: Option<String>,
repeat: bool,
sep: u8,
}
mod options {
pub static ECHO: &str = "echo";
pub static INPUT_RANGE: &str = "input-range";
pub static HEAD_COUNT: &str = "head-count";
pub static OUTPUT: &str = "output";
pub static RANDOM_SOURCE: &str = "random-source";
pub static REPEAT: &str = "repeat";
pub static ZERO_TERMINATED: &str = "zero-terminated";
pub static FILE: &str = "file";
}
#[uucore::main]
pub fn | (args: impl uucore::Args) -> UResult<()> {
let args = args.collect_lossy();
let matches = uu_app().try_get_matches_from(args)?;
let mode = if let Some(args) = matches.get_many::<String>(options::ECHO) {
Mode::Echo(args.map(String::from).collect())
} else if let Some(range) = matches.get_one::<String>(options::INPUT_RANGE) {
match parse_range(range) {
Ok(m) => Mode::InputRange(m),
Err(msg) => {
return Err(USimpleError::new(1, msg));
}
}
} else {
Mode::Default(
matches
.get_one::<String>(options::FILE)
.map(|s| s.as_str())
.unwrap_or("-")
.to_string(),
)
};
let options = Options {
head_count: {
let headcounts = matches
.get_many::<String>(options::HEAD_COUNT)
.unwrap_or_default()
.map(|s| s.to_owned())
.collect();
match parse_head_count(headcounts) {
Ok(val) => val,
Err(msg) => return Err(USimpleError::new(1, msg)),
}
},
output: matches.get_one::<String>(options::OUTPUT).map(String::from),
random_source: matches
.get_one::<String>(options::RANDOM_SOURCE)
.map(String::from),
repeat: matches.get_flag(options::REPEAT),
sep: if matches.get_flag(options::ZERO_TERMINATED) {
0x00_u8
} else {
0x0a_u8
},
};
match mode {
Mode::Echo(args) => {
let mut evec = args.iter().map(String::as_bytes).collect::<Vec<_>>();
find_seps(&mut evec, options.sep);
shuf_bytes(&mut evec, options)?;
}
Mode::InputRange((b, e)) => {
let rvec = (b..e).map(|x| format!("{x}")).collect::<Vec<String>>();
let mut rvec = rvec.iter().map(String::as_bytes).collect::<Vec<&[u8]>>();
shuf_bytes(&mut rvec, options)?;
}
Mode::Default(filename) => {
let fdata = read_input_file(&filename)?;
let mut fdata = vec![&fdata[..]];
find_seps(&mut fdata, options.sep);
shuf_bytes(&mut fdata, options)?;
}
}
Ok(())
}
pub fn uu_app() -> Command {
Command::new(uucore::util_name())
.about(ABOUT)
.version(crate_version!())
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.args_override_self(true)
.arg(
Arg::new(options::ECHO)
.short('e')
.long(options::ECHO)
.value_name("ARG")
.help("treat each ARG as an input line")
.use_value_delimiter(false)
.num_args(0..)
.conflicts_with(options::INPUT_RANGE),
)
.arg(
Arg::new(options::INPUT_RANGE)
.short('i')
.long(options::INPUT_RANGE)
.value_name("LO-HI")
.help("treat each number LO through HI as an input line")
.conflicts_with(options::FILE),
)
.arg(
Arg::new(options::HEAD_COUNT)
.short('n')
.long(options::HEAD_COUNT)
.value_name("COUNT")
.help("output at most COUNT lines"),
)
.arg(
Arg::new(options::OUTPUT)
.short('o')
.long(options::OUTPUT)
.value_name("FILE")
.help("write result to FILE instead of standard output")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::RANDOM_SOURCE)
.long(options::RANDOM_SOURCE)
.value_name("FILE")
.help("get random bytes from FILE")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::REPEAT)
.short('r')
.long(options::REPEAT)
.help("output lines can be repeated")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new(options::ZERO_TERMINATED)
.short('z')
.long(options::ZERO_TERMINATED)
.help("line delimiter is NUL, not newline")
.action(ArgAction::SetTrue),
)
.arg(Arg::new(options::FILE).value_hint(clap::ValueHint::FilePath))
}
fn read_input_file(filename: &str) -> UResult<Vec<u8>> {
let mut file = BufReader::new(if filename == "-" {
Box::new(stdin()) as Box<dyn Read>
} else {
let file = File::open(filename)
.map_err_context(|| format!("failed to open {}", filename.quote()))?;
Box::new(file) as Box<dyn Read>
});
let mut data = Vec::new();
file.read_to_end(&mut data)
.map_err_context(|| format!("failed reading {}", filename.quote()))?;
Ok(data)
}
fn find_seps(data: &mut Vec<&[u8]>, sep: u8) {
// need to use for loop so we don't borrow the vector as we modify it in place
// basic idea:
// * We don't care about the order of the result. This lets us slice the slices
// without making a new vector.
// * Starting from the end of the vector, we examine each element.
// * If that element contains the separator, we remove it from the vector,
// and then sub-slice it into slices that do not contain the separator.
// * We maintain the invariant throughout that each element in the vector past
// the ith element does not have any separators remaining.
for i in (0..data.len()).rev() {
if data[i].contains(&sep) {
let this = data.swap_remove(i);
let mut p = 0;
for i in memchr_iter(sep, this) {
data.push(&this[p..i]);
p = i + 1;
}
if p < this.len() {
data.push(&this[p..]);
}
}
}
}
fn shuf_bytes(input: &mut Vec<&[u8]>, opts: Options) -> UResult<()> {
let mut output = BufWriter::new(match opts.output {
None => Box::new(stdout()) as Box<dyn Write>,
Some(s) => {
let file = File::create(&s[..])
.map_err_context(|| format!("failed to open {} for writing", s.quote()))?;
Box::new(file) as Box<dyn Write>
}
});
let mut rng = match opts.random_source {
Some(r) => {
let file = File::open(&r[..])
.map_err_context(|| format!("failed to open random source {}", r.quote()))?;
WrappedRng::RngFile(rand_read_adapter::ReadRng::new(file))
}
None => WrappedRng::RngDefault(rand::thread_rng()),
};
if input.is_empty() {
return Ok(());
}
if opts.repeat {
for _ in 0..opts.head_count {
// Returns None is the slice is empty. We checked this before, so
// this is safe.
let r = input.choose(&mut rng).unwrap();
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
} else {
let (shuffled, _) = input.partial_shuffle(&mut rng, opts.head_count);
for r in shuffled {
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
}
Ok(())
}
fn parse_range(input_range: &str) -> Result<(usize, usize), String> {
if let Some((from, to)) = input_range.split_once('-') {
let begin = from
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", from.quote()))?;
let end = to
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", to.quote()))?;
Ok((begin, end + 1))
} else {
Err(format!("invalid input range: {}", input_range.quote()))
}
}
fn parse_head_count(headcounts: Vec<String>) -> Result<usize, String> {
let mut result = std::usize::MAX;
for count in headcounts {
match count.parse::<usize>() {
Ok(pv) => result = std::cmp::min(result, pv),
Err(_) => return Err(format!("invalid line count: {}", count.quote())),
}
}
Ok(result)
}
enum WrappedRng {
RngFile(rand_read_adapter::ReadRng<File>),
RngDefault(rand::rngs::ThreadRng),
}
impl RngCore for WrappedRng {
fn next_u32(&mut self) -> u32 {
match self {
Self::RngFile(r) => r.next_u32(),
Self::RngDefault(r) => r.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self {
Self::RngFile(r) => r.next_u64(),
Self::RngDefault(r) => r.next_u64(),
}
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
match self {
Self::RngFile(r) => r.fill_bytes(dest),
Self::RngDefault(r) => r.fill_bytes(dest),
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
match self {
Self::RngFile(r) => r.try_fill_bytes(dest),
Self::RngDefault(r) => r.try_fill_bytes(dest),
}
}
}
| uumain | identifier_name |
shuf.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) cmdline evec seps rvec fdata
use clap::{crate_version, Arg, ArgAction, Command};
use memchr::memchr_iter;
use rand::prelude::SliceRandom;
use rand::RngCore;
use std::fs::File;
use std::io::{stdin, stdout, BufReader, BufWriter, Read, Write};
use uucore::display::Quotable;
use uucore::error::{FromIo, UResult, USimpleError};
use uucore::{format_usage, help_about, help_usage};
mod rand_read_adapter;
enum Mode {
Default(String),
Echo(Vec<String>),
InputRange((usize, usize)),
}
static USAGE: &str = help_usage!("shuf.md");
static ABOUT: &str = help_about!("shuf.md");
struct Options {
head_count: usize,
output: Option<String>,
random_source: Option<String>,
repeat: bool,
sep: u8,
}
mod options {
pub static ECHO: &str = "echo";
pub static INPUT_RANGE: &str = "input-range";
pub static HEAD_COUNT: &str = "head-count";
pub static OUTPUT: &str = "output";
pub static RANDOM_SOURCE: &str = "random-source";
pub static REPEAT: &str = "repeat";
pub static ZERO_TERMINATED: &str = "zero-terminated";
pub static FILE: &str = "file";
}
#[uucore::main]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args.collect_lossy();
let matches = uu_app().try_get_matches_from(args)?;
let mode = if let Some(args) = matches.get_many::<String>(options::ECHO) {
Mode::Echo(args.map(String::from).collect())
} else if let Some(range) = matches.get_one::<String>(options::INPUT_RANGE) {
match parse_range(range) {
Ok(m) => Mode::InputRange(m),
Err(msg) => {
return Err(USimpleError::new(1, msg));
}
}
} else {
Mode::Default(
matches
.get_one::<String>(options::FILE)
.map(|s| s.as_str())
.unwrap_or("-")
.to_string(),
)
};
let options = Options {
head_count: {
let headcounts = matches
.get_many::<String>(options::HEAD_COUNT)
.unwrap_or_default()
.map(|s| s.to_owned())
.collect();
match parse_head_count(headcounts) {
Ok(val) => val,
Err(msg) => return Err(USimpleError::new(1, msg)),
}
},
output: matches.get_one::<String>(options::OUTPUT).map(String::from),
random_source: matches
.get_one::<String>(options::RANDOM_SOURCE)
.map(String::from),
repeat: matches.get_flag(options::REPEAT),
sep: if matches.get_flag(options::ZERO_TERMINATED) {
0x00_u8
} else {
0x0a_u8
},
};
match mode {
Mode::Echo(args) => {
let mut evec = args.iter().map(String::as_bytes).collect::<Vec<_>>();
find_seps(&mut evec, options.sep);
shuf_bytes(&mut evec, options)?;
}
Mode::InputRange((b, e)) => {
let rvec = (b..e).map(|x| format!("{x}")).collect::<Vec<String>>();
let mut rvec = rvec.iter().map(String::as_bytes).collect::<Vec<&[u8]>>();
shuf_bytes(&mut rvec, options)?;
}
Mode::Default(filename) => {
let fdata = read_input_file(&filename)?;
let mut fdata = vec![&fdata[..]];
find_seps(&mut fdata, options.sep);
shuf_bytes(&mut fdata, options)?;
}
}
Ok(())
}
pub fn uu_app() -> Command {
Command::new(uucore::util_name())
.about(ABOUT) | .override_usage(format_usage(USAGE))
.infer_long_args(true)
.args_override_self(true)
.arg(
Arg::new(options::ECHO)
.short('e')
.long(options::ECHO)
.value_name("ARG")
.help("treat each ARG as an input line")
.use_value_delimiter(false)
.num_args(0..)
.conflicts_with(options::INPUT_RANGE),
)
.arg(
Arg::new(options::INPUT_RANGE)
.short('i')
.long(options::INPUT_RANGE)
.value_name("LO-HI")
.help("treat each number LO through HI as an input line")
.conflicts_with(options::FILE),
)
.arg(
Arg::new(options::HEAD_COUNT)
.short('n')
.long(options::HEAD_COUNT)
.value_name("COUNT")
.help("output at most COUNT lines"),
)
.arg(
Arg::new(options::OUTPUT)
.short('o')
.long(options::OUTPUT)
.value_name("FILE")
.help("write result to FILE instead of standard output")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::RANDOM_SOURCE)
.long(options::RANDOM_SOURCE)
.value_name("FILE")
.help("get random bytes from FILE")
.value_hint(clap::ValueHint::FilePath),
)
.arg(
Arg::new(options::REPEAT)
.short('r')
.long(options::REPEAT)
.help("output lines can be repeated")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new(options::ZERO_TERMINATED)
.short('z')
.long(options::ZERO_TERMINATED)
.help("line delimiter is NUL, not newline")
.action(ArgAction::SetTrue),
)
.arg(Arg::new(options::FILE).value_hint(clap::ValueHint::FilePath))
}
fn read_input_file(filename: &str) -> UResult<Vec<u8>> {
let mut file = BufReader::new(if filename == "-" {
Box::new(stdin()) as Box<dyn Read>
} else {
let file = File::open(filename)
.map_err_context(|| format!("failed to open {}", filename.quote()))?;
Box::new(file) as Box<dyn Read>
});
let mut data = Vec::new();
file.read_to_end(&mut data)
.map_err_context(|| format!("failed reading {}", filename.quote()))?;
Ok(data)
}
fn find_seps(data: &mut Vec<&[u8]>, sep: u8) {
// need to use for loop so we don't borrow the vector as we modify it in place
// basic idea:
// * We don't care about the order of the result. This lets us slice the slices
// without making a new vector.
// * Starting from the end of the vector, we examine each element.
// * If that element contains the separator, we remove it from the vector,
// and then sub-slice it into slices that do not contain the separator.
// * We maintain the invariant throughout that each element in the vector past
// the ith element does not have any separators remaining.
for i in (0..data.len()).rev() {
if data[i].contains(&sep) {
let this = data.swap_remove(i);
let mut p = 0;
for i in memchr_iter(sep, this) {
data.push(&this[p..i]);
p = i + 1;
}
if p < this.len() {
data.push(&this[p..]);
}
}
}
}
fn shuf_bytes(input: &mut Vec<&[u8]>, opts: Options) -> UResult<()> {
let mut output = BufWriter::new(match opts.output {
None => Box::new(stdout()) as Box<dyn Write>,
Some(s) => {
let file = File::create(&s[..])
.map_err_context(|| format!("failed to open {} for writing", s.quote()))?;
Box::new(file) as Box<dyn Write>
}
});
let mut rng = match opts.random_source {
Some(r) => {
let file = File::open(&r[..])
.map_err_context(|| format!("failed to open random source {}", r.quote()))?;
WrappedRng::RngFile(rand_read_adapter::ReadRng::new(file))
}
None => WrappedRng::RngDefault(rand::thread_rng()),
};
if input.is_empty() {
return Ok(());
}
if opts.repeat {
for _ in 0..opts.head_count {
// Returns None is the slice is empty. We checked this before, so
// this is safe.
let r = input.choose(&mut rng).unwrap();
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
} else {
let (shuffled, _) = input.partial_shuffle(&mut rng, opts.head_count);
for r in shuffled {
output
.write_all(r)
.map_err_context(|| "write failed".to_string())?;
output
.write_all(&[opts.sep])
.map_err_context(|| "write failed".to_string())?;
}
}
Ok(())
}
fn parse_range(input_range: &str) -> Result<(usize, usize), String> {
if let Some((from, to)) = input_range.split_once('-') {
let begin = from
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", from.quote()))?;
let end = to
.parse::<usize>()
.map_err(|_| format!("invalid input range: {}", to.quote()))?;
Ok((begin, end + 1))
} else {
Err(format!("invalid input range: {}", input_range.quote()))
}
}
fn parse_head_count(headcounts: Vec<String>) -> Result<usize, String> {
let mut result = std::usize::MAX;
for count in headcounts {
match count.parse::<usize>() {
Ok(pv) => result = std::cmp::min(result, pv),
Err(_) => return Err(format!("invalid line count: {}", count.quote())),
}
}
Ok(result)
}
enum WrappedRng {
RngFile(rand_read_adapter::ReadRng<File>),
RngDefault(rand::rngs::ThreadRng),
}
impl RngCore for WrappedRng {
fn next_u32(&mut self) -> u32 {
match self {
Self::RngFile(r) => r.next_u32(),
Self::RngDefault(r) => r.next_u32(),
}
}
fn next_u64(&mut self) -> u64 {
match self {
Self::RngFile(r) => r.next_u64(),
Self::RngDefault(r) => r.next_u64(),
}
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
match self {
Self::RngFile(r) => r.fill_bytes(dest),
Self::RngDefault(r) => r.fill_bytes(dest),
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
match self {
Self::RngFile(r) => r.try_fill_bytes(dest),
Self::RngDefault(r) => r.try_fill_bytes(dest),
}
}
} | .version(crate_version!()) | random_line_split |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate syntex_syntax as syntax;
extern crate rustc_serialize;
extern crate strings;
extern crate unicode_segmentation;
extern crate regex;
extern crate diff;
extern crate term;
use syntax::ast;
use syntax::codemap::{mk_sp, CodeMap, Span};
use syntax::errors::Handler;
use syntax::errors::emitter::{ColorConfig, EmitterWriter};
use syntax::parse::{self, ParseSess};
use std::io::stdout;
use std::ops::{Add, Sub};
use std::path::Path;
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use issues::{BadIssueSeeker, Issue};
use filemap::FileMap;
use visitor::FmtVisitor;
use config::Config;
#[macro_use]
mod utils;
pub mod config;
pub mod filemap;
mod visitor;
mod checkstyle;
mod items;
mod missed_spans;
mod lists;
mod types;
mod expr;
mod imports;
mod issues;
mod rewrite;
mod string;
mod comment;
mod modules;
pub mod rustfmt_diff;
mod chains;
mod macros;
mod patterns;
const MIN_STRING: usize = 10;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
pub trait Spanned {
fn span(&self) -> Span;
}
impl Spanned for ast::Expr {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Pat {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Ty {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Arg {
fn span(&self) -> Span {
if items::is_named_arg(self) {
mk_sp(self.pat.span.lo, self.ty.span.hi)
} else {
self.ty.span
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct Indent {
// Width of the block indent, in characters. Must be a multiple of
// Config::tab_spaces.
pub block_indent: usize,
// Alignment in characters.
pub alignment: usize,
}
impl Indent {
pub fn new(block_indent: usize, alignment: usize) -> Indent {
Indent {
block_indent: block_indent,
alignment: alignment,
}
}
pub fn empty() -> Indent {
Indent::new(0, 0)
}
pub fn block_indent(mut self, config: &Config) -> Indent {
self.block_indent += config.tab_spaces;
self
}
pub fn block_unindent(mut self, config: &Config) -> Indent {
self.block_indent -= config.tab_spaces;
self
}
pub fn width(&self) -> usize {
self.block_indent + self.alignment
}
pub fn to_string(&self, config: &Config) -> String {
let (num_tabs, num_spaces) = if config.hard_tabs {
(self.block_indent / config.tab_spaces, self.alignment)
} else {
(0, self.block_indent + self.alignment)
};
let num_chars = num_tabs + num_spaces;
let mut indent = String::with_capacity(num_chars);
for _ in 0..num_tabs {
indent.push('\t')
}
for _ in 0..num_spaces {
indent.push(' ')
}
indent
}
}
impl Add for Indent {
type Output = Indent;
fn add(self, rhs: Indent) -> Indent {
Indent {
block_indent: self.block_indent + rhs.block_indent, | }
impl Sub for Indent {
type Output = Indent;
fn sub(self, rhs: Indent) -> Indent {
Indent::new(self.block_indent - rhs.block_indent,
self.alignment - rhs.alignment)
}
}
impl Add<usize> for Indent {
type Output = Indent;
fn add(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment + rhs)
}
}
impl Sub<usize> for Indent {
type Output = Indent;
fn sub(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment - rhs)
}
}
pub enum ErrorKind {
// Line has exceeded character limit
LineOverflow,
// Line ends in whitespace
TrailingWhitespace,
// TO-DO or FIX-ME item without an issue number
BadIssue(Issue),
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ErrorKind::LineOverflow => write!(fmt, "line exceeded maximum length"),
ErrorKind::TrailingWhitespace => write!(fmt, "left behind trailing whitespace"),
ErrorKind::BadIssue(issue) => write!(fmt, "found {}", issue),
}
}
}
// Formatting errors that are identified *after* rustfmt has run.
pub struct FormattingError {
line: u32,
kind: ErrorKind,
}
impl FormattingError {
fn msg_prefix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "Rustfmt failed at",
ErrorKind::BadIssue(_) => "WARNING:",
}
}
fn msg_suffix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "(sorry)",
ErrorKind::BadIssue(_) => "",
}
}
}
pub struct FormatReport {
// Maps stringified file paths to their associated formatting errors.
file_error_map: HashMap<String, Vec<FormattingError>>,
}
impl FormatReport {
pub fn warning_count(&self) -> usize {
self.file_error_map.iter().map(|(_, ref errors)| errors.len()).fold(0, |acc, x| acc + x)
}
}
impl fmt::Display for FormatReport {
// Prints all the formatting errors.
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
for (file, errors) in &self.file_error_map {
for error in errors {
try!(write!(fmt,
"{} {}:{}: {} {}\n",
error.msg_prefix(),
file,
error.line,
error.kind,
error.msg_suffix()));
}
}
Ok(())
}
}
// Formatting which depends on the AST.
fn fmt_ast(krate: &ast::Crate,
parse_session: &ParseSess,
main_file: &Path,
config: &Config)
-> FileMap {
let mut file_map = FileMap::new();
for (path, module) in modules::list_files(krate, parse_session.codemap()) {
if config.skip_children && path.as_path()!= main_file {
continue;
}
let path = path.to_str().unwrap();
if config.verbose {
println!("Formatting {}", path);
}
let mut visitor = FmtVisitor::from_codemap(parse_session, config);
visitor.format_separate_mod(module);
file_map.insert(path.to_owned(), visitor.buffer);
}
file_map
}
// Formatting done on a char by char or line by line basis.
// TODO(#209) warn on bad license
// TODO(#20) other stuff for parity with make tidy
pub fn fmt_lines(file_map: &mut FileMap, config: &Config) -> FormatReport {
let mut truncate_todo = Vec::new();
let mut report = FormatReport { file_error_map: HashMap::new() };
// Iterate over the chars in the file map.
for (f, text) in file_map.iter() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
let mut errors = vec![];
let mut issue_seeker = BadIssueSeeker::new(config.report_todo, config.report_fixme);
for (c, b) in text.chars() {
if c == '\r' {
line_len += c.len_utf8();
continue;
}
// Add warnings for bad todos/ fixmes
if let Some(issue) = issue_seeker.inspect(c) {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::BadIssue(issue),
});
}
if c == '\n' {
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > config.max_width {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::LineOverflow,
});
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += c.len_utf8();
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_owned(), text.len - newline_count + 1))
}
for &(l, _, _) in &trims {
errors.push(FormattingError {
line: l,
kind: ErrorKind::TrailingWhitespace,
});
}
report.file_error_map.insert(f.to_owned(), errors);
}
for (f, l) in truncate_todo {
file_map.get_mut(&f).unwrap().truncate(l);
}
report
}
pub fn format_string(input: String, config: &Config) -> FileMap {
let path = "stdin";
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let krate = parse::parse_crate_from_source_str(path.to_owned(),
input,
Vec::new(),
&parse_session);
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
// FIXME: we still use a FileMap even though we only have
// one file, because fmt_lines requires a FileMap
let mut file_map = FileMap::new();
// do the actual formatting
let mut visitor = FmtVisitor::from_codemap(&parse_session, config);
visitor.format_separate_mod(&krate.module);
// append final newline
visitor.buffer.push_str("\n");
file_map.insert(path.to_owned(), visitor.buffer);
file_map
}
pub fn format(file: &Path, config: &Config) -> FileMap {
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let krate = parse::parse_crate_from_file(file, Vec::new(), &parse_session);
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
let mut file_map = fmt_ast(&krate, &parse_session, file, config);
// For some reason, the codemap does not include terminating
// newlines so we must add one on for each file. This is sad.
filemap::append_newlines(&mut file_map);
file_map
}
pub fn run(file: &Path, config: &Config) {
let mut result = format(file, config);
print!("{}", fmt_lines(&mut result, config));
let out = stdout();
let write_result = filemap::write_all_files(&result, out, config);
if let Err(msg) = write_result {
println!("Error writing files: {}", msg);
}
}
// Similar to run, but takes an input String instead of a file to format
pub fn run_from_stdin(input: String, config: &Config) {
let mut result = format_string(input, config);
fmt_lines(&mut result, config);
let mut out = stdout();
let write_result = filemap::write_file(&result["stdin"], "stdin", &mut out, config);
if let Err(msg) = write_result {
panic!("Error writing to stdout: {}", msg);
}
} | alignment: self.alignment + rhs.alignment,
}
} | random_line_split |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate syntex_syntax as syntax;
extern crate rustc_serialize;
extern crate strings;
extern crate unicode_segmentation;
extern crate regex;
extern crate diff;
extern crate term;
use syntax::ast;
use syntax::codemap::{mk_sp, CodeMap, Span};
use syntax::errors::Handler;
use syntax::errors::emitter::{ColorConfig, EmitterWriter};
use syntax::parse::{self, ParseSess};
use std::io::stdout;
use std::ops::{Add, Sub};
use std::path::Path;
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use issues::{BadIssueSeeker, Issue};
use filemap::FileMap;
use visitor::FmtVisitor;
use config::Config;
#[macro_use]
mod utils;
pub mod config;
pub mod filemap;
mod visitor;
mod checkstyle;
mod items;
mod missed_spans;
mod lists;
mod types;
mod expr;
mod imports;
mod issues;
mod rewrite;
mod string;
mod comment;
mod modules;
pub mod rustfmt_diff;
mod chains;
mod macros;
mod patterns;
const MIN_STRING: usize = 10;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
pub trait Spanned {
fn span(&self) -> Span;
}
impl Spanned for ast::Expr {
fn | (&self) -> Span {
self.span
}
}
impl Spanned for ast::Pat {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Ty {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Arg {
fn span(&self) -> Span {
if items::is_named_arg(self) {
mk_sp(self.pat.span.lo, self.ty.span.hi)
} else {
self.ty.span
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct Indent {
// Width of the block indent, in characters. Must be a multiple of
// Config::tab_spaces.
pub block_indent: usize,
// Alignment in characters.
pub alignment: usize,
}
impl Indent {
pub fn new(block_indent: usize, alignment: usize) -> Indent {
Indent {
block_indent: block_indent,
alignment: alignment,
}
}
pub fn empty() -> Indent {
Indent::new(0, 0)
}
pub fn block_indent(mut self, config: &Config) -> Indent {
self.block_indent += config.tab_spaces;
self
}
pub fn block_unindent(mut self, config: &Config) -> Indent {
self.block_indent -= config.tab_spaces;
self
}
pub fn width(&self) -> usize {
self.block_indent + self.alignment
}
pub fn to_string(&self, config: &Config) -> String {
let (num_tabs, num_spaces) = if config.hard_tabs {
(self.block_indent / config.tab_spaces, self.alignment)
} else {
(0, self.block_indent + self.alignment)
};
let num_chars = num_tabs + num_spaces;
let mut indent = String::with_capacity(num_chars);
for _ in 0..num_tabs {
indent.push('\t')
}
for _ in 0..num_spaces {
indent.push(' ')
}
indent
}
}
impl Add for Indent {
type Output = Indent;
fn add(self, rhs: Indent) -> Indent {
Indent {
block_indent: self.block_indent + rhs.block_indent,
alignment: self.alignment + rhs.alignment,
}
}
}
impl Sub for Indent {
type Output = Indent;
fn sub(self, rhs: Indent) -> Indent {
Indent::new(self.block_indent - rhs.block_indent,
self.alignment - rhs.alignment)
}
}
impl Add<usize> for Indent {
type Output = Indent;
fn add(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment + rhs)
}
}
impl Sub<usize> for Indent {
type Output = Indent;
fn sub(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment - rhs)
}
}
pub enum ErrorKind {
// Line has exceeded character limit
LineOverflow,
// Line ends in whitespace
TrailingWhitespace,
// TO-DO or FIX-ME item without an issue number
BadIssue(Issue),
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ErrorKind::LineOverflow => write!(fmt, "line exceeded maximum length"),
ErrorKind::TrailingWhitespace => write!(fmt, "left behind trailing whitespace"),
ErrorKind::BadIssue(issue) => write!(fmt, "found {}", issue),
}
}
}
// Formatting errors that are identified *after* rustfmt has run.
pub struct FormattingError {
line: u32,
kind: ErrorKind,
}
impl FormattingError {
fn msg_prefix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "Rustfmt failed at",
ErrorKind::BadIssue(_) => "WARNING:",
}
}
fn msg_suffix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "(sorry)",
ErrorKind::BadIssue(_) => "",
}
}
}
pub struct FormatReport {
// Maps stringified file paths to their associated formatting errors.
file_error_map: HashMap<String, Vec<FormattingError>>,
}
impl FormatReport {
pub fn warning_count(&self) -> usize {
self.file_error_map.iter().map(|(_, ref errors)| errors.len()).fold(0, |acc, x| acc + x)
}
}
impl fmt::Display for FormatReport {
// Prints all the formatting errors.
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
for (file, errors) in &self.file_error_map {
for error in errors {
try!(write!(fmt,
"{} {}:{}: {} {}\n",
error.msg_prefix(),
file,
error.line,
error.kind,
error.msg_suffix()));
}
}
Ok(())
}
}
// Formatting which depends on the AST.
fn fmt_ast(krate: &ast::Crate,
parse_session: &ParseSess,
main_file: &Path,
config: &Config)
-> FileMap {
let mut file_map = FileMap::new();
for (path, module) in modules::list_files(krate, parse_session.codemap()) {
if config.skip_children && path.as_path()!= main_file {
continue;
}
let path = path.to_str().unwrap();
if config.verbose {
println!("Formatting {}", path);
}
let mut visitor = FmtVisitor::from_codemap(parse_session, config);
visitor.format_separate_mod(module);
file_map.insert(path.to_owned(), visitor.buffer);
}
file_map
}
// Formatting done on a char by char or line by line basis.
// TODO(#209) warn on bad license
// TODO(#20) other stuff for parity with make tidy
pub fn fmt_lines(file_map: &mut FileMap, config: &Config) -> FormatReport {
let mut truncate_todo = Vec::new();
let mut report = FormatReport { file_error_map: HashMap::new() };
// Iterate over the chars in the file map.
for (f, text) in file_map.iter() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
let mut errors = vec![];
let mut issue_seeker = BadIssueSeeker::new(config.report_todo, config.report_fixme);
for (c, b) in text.chars() {
if c == '\r' {
line_len += c.len_utf8();
continue;
}
// Add warnings for bad todos/ fixmes
if let Some(issue) = issue_seeker.inspect(c) {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::BadIssue(issue),
});
}
if c == '\n' {
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > config.max_width {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::LineOverflow,
});
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += c.len_utf8();
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_owned(), text.len - newline_count + 1))
}
for &(l, _, _) in &trims {
errors.push(FormattingError {
line: l,
kind: ErrorKind::TrailingWhitespace,
});
}
report.file_error_map.insert(f.to_owned(), errors);
}
for (f, l) in truncate_todo {
file_map.get_mut(&f).unwrap().truncate(l);
}
report
}
pub fn format_string(input: String, config: &Config) -> FileMap {
let path = "stdin";
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let krate = parse::parse_crate_from_source_str(path.to_owned(),
input,
Vec::new(),
&parse_session);
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
// FIXME: we still use a FileMap even though we only have
// one file, because fmt_lines requires a FileMap
let mut file_map = FileMap::new();
// do the actual formatting
let mut visitor = FmtVisitor::from_codemap(&parse_session, config);
visitor.format_separate_mod(&krate.module);
// append final newline
visitor.buffer.push_str("\n");
file_map.insert(path.to_owned(), visitor.buffer);
file_map
}
pub fn format(file: &Path, config: &Config) -> FileMap {
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let krate = parse::parse_crate_from_file(file, Vec::new(), &parse_session);
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
let mut file_map = fmt_ast(&krate, &parse_session, file, config);
// For some reason, the codemap does not include terminating
// newlines so we must add one on for each file. This is sad.
filemap::append_newlines(&mut file_map);
file_map
}
pub fn run(file: &Path, config: &Config) {
let mut result = format(file, config);
print!("{}", fmt_lines(&mut result, config));
let out = stdout();
let write_result = filemap::write_all_files(&result, out, config);
if let Err(msg) = write_result {
println!("Error writing files: {}", msg);
}
}
// Similar to run, but takes an input String instead of a file to format
pub fn run_from_stdin(input: String, config: &Config) {
let mut result = format_string(input, config);
fmt_lines(&mut result, config);
let mut out = stdout();
let write_result = filemap::write_file(&result["stdin"], "stdin", &mut out, config);
if let Err(msg) = write_result {
panic!("Error writing to stdout: {}", msg);
}
}
| span | identifier_name |
aa_changes.rs | use crate::alphabet::aa::Aa;
use crate::alphabet::letter::Letter;
use crate::alphabet::letter::{serde_deserialize_seq, serde_serialize_seq};
use crate::alphabet::nuc::Nuc;
use crate::analyze::aa_del::AaDel;
use crate::analyze::aa_sub::AaSub;
use crate::analyze::nuc_del::NucDelRange;
use crate::analyze::nuc_sub::NucSub;
use crate::coord::coord_map_cds_to_global::cds_codon_pos_to_ref_range;
use crate::coord::position::{AaRefPosition, NucRefGlobalPosition, PositionLike};
use crate::coord::range::{have_intersection, AaRefRange, NucRefGlobalRange};
use crate::gene::cds::Cds;
use crate::gene::gene::GeneStrand;
use crate::gene::gene_map::GeneMap;
use crate::translate::complement::reverse_complement_in_place;
use crate::translate::translate_genes::{CdsTranslation, Translation};
use crate::utils::collections::extend_map_of_vecs;
use either::Either;
use eyre::Report;
use itertools::{Itertools, MinMaxResult};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangeWithContext {
pub cds_name: String,
pub pos: AaRefPosition,
pub ref_aa: Aa,
pub qry_aa: Aa,
pub nuc_pos: NucRefGlobalPosition,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub ref_triplet: Vec<Nuc>,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub qry_triplet: Vec<Nuc>,
pub nuc_ranges: Vec<NucRefGlobalRange>,
}
impl AaChangeWithContext {
pub fn new(
cds: &Cds,
pos: AaRefPosition,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
) -> Self {
let ref_aa = ref_tr.seq[pos.as_usize()];
let qry_aa = qry_tr.seq[pos.as_usize()];
let nuc_ranges = cds_codon_pos_to_ref_range(cds, pos);
let ref_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = ref_seq[range.to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let qry_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = qry_seq[range.clamp_range(0, qry_seq.len()).to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let nuc_ranges = nuc_ranges.into_iter().map(|(range, _)| range).collect_vec();
Self {
cds_name: cds.name.clone(),
pos,
ref_aa,
qry_aa,
nuc_pos: nuc_ranges[0].begin,
nuc_ranges,
ref_triplet,
qry_triplet,
}
}
#[inline]
pub fn is_mutated_or_deleted(&self) -> bool {
is_aa_mutated_or_deleted(self.ref_aa, self.qry_aa)
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangesGroup {
name: String,
range: AaRefRange,
changes: Vec<AaChangeWithContext>,
nuc_subs: Vec<NucSub>,
nuc_dels: Vec<NucDelRange>,
}
impl AaChangesGroup {
pub fn new(name: impl AsRef<str>) -> Self {
Self::with_changes(name, vec![])
}
pub fn with_changes(name: impl AsRef<str>, changes: Vec<AaChangeWithContext>) -> Self {
Self {
name: name.as_ref().to_owned(),
range: Self::find_codon_range(&changes),
changes,
nuc_subs: vec![],
nuc_dels: vec![],
}
}
pub fn push(&mut self, change: AaChangeWithContext) {
self.changes.push(change);
self.range = Self::find_codon_range(&self.changes);
}
pub fn last(&self) -> Option<&AaChangeWithContext> {
self.changes.last()
}
fn find_codon_range(changes: &[AaChangeWithContext]) -> AaRefRange {
match changes.iter().minmax_by_key(|change| change.pos) {
MinMaxResult::NoElements => AaRefRange::from_isize(0, 0),
MinMaxResult::OneElement(one) => AaRefRange::new(one.pos, one.pos + 1),
MinMaxResult::MinMax(first, last) => AaRefRange::new(first.pos, last.pos + 1),
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct FindAaChangesOutput {
pub aa_changes_groups: Vec<AaChangesGroup>,
pub aa_substitutions: Vec<AaSub>,
pub aa_deletions: Vec<AaDel>,
pub nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>>,
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in all genes
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
pub fn find_aa_changes(
ref_seq: &[Nuc],
qry_seq: &[Nuc],
ref_translation: &Translation,
qry_translation: &Translation,
gene_map: &GeneMap,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> Result<FindAaChangesOutput, Report> |
changes.aa_substitutions.sort();
changes.aa_deletions.sort();
changes.nuc_to_aa_muts.iter_mut().for_each(|(_, vals)| {
vals.sort();
vals.dedup();
});
Ok(changes)
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in one gene
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
///
///
/// ## Implementation details
/// We compare reference and query peptides (extracted by the preceding call to Nextalign),
/// one aminoacid at at time, and deduce changes. We then report the change and relevant nucleotide context surrounding
/// this change.
/// Previously we reported one-to-one mapping of aminoacid changes to corresponding nucleotide changes. However, it
/// was not always accurate, because if there are multiple nucleotide changes in a codon, the direct correspondence
/// might not always be established without knowing the order in which nucleotide changes have occurred. And in the
/// context of Nextclade we don't have this information.
fn find_aa_changes_for_cds(
cds: &Cds,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> FindAaChangesOutput {
assert_eq!(ref_tr.seq.len(), qry_tr.seq.len());
assert_eq!(qry_seq.len(), ref_seq.len());
let aa_alignment_ranges = &qry_tr.alignment_ranges;
let mut aa_changes_groups = vec![AaChangesGroup::new(&cds.name)];
let mut curr_group = aa_changes_groups.last_mut().unwrap();
for codon in AaRefRange::from_usize(0, qry_tr.seq.len()).iter() {
if!is_codon_sequenced(aa_alignment_ranges, codon) {
continue;
}
let ref_aa = ref_tr.seq[codon.as_usize()];
let qry_aa = qry_tr.seq[codon.as_usize()];
if is_aa_mutated_or_deleted(ref_aa, qry_aa) {
match curr_group.last() {
// If current group is empty, then we are about to insert the first codon into the first group.
None => {
if codon > 0 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the group
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// The current codon itself
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// Current group is not empty
Some(prev) => {
// If previous codon in the group is adjacent or almost adjacent (there is 1 item in between),
// then append to the group.
if codon <= prev.pos + 2 {
// If previous codon in the group is not exactly adjacent, there is 1 item in between,
// then cover the hole by inserting previous codon.
if codon == prev.pos + 2 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// And insert the current codon
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// If previous codon in the group is not adjacent, then terminate the current group and start a new group.
else {
// Add one codon to the right, for additional context, to finalize the current group
if is_codon_sequenced(aa_alignment_ranges, prev.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
prev.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
let mut new_group = AaChangesGroup::new(&cds.name);
// Start a new group and push the current codon into it.
if is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the new group.
new_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// Push the current codon to the new group
new_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
aa_changes_groups.push(new_group);
curr_group = aa_changes_groups.last_mut().unwrap();
}
}
}
}
}
// Add one codon to the right, for additional context, to finalize the last group
if let Some(last) = curr_group.last() {
if is_codon_sequenced(aa_alignment_ranges, last.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
last.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
}
// Keep only non-empty groups
aa_changes_groups.retain(|group|!group.range.is_empty() &&!group.changes.is_empty());
aa_changes_groups.iter_mut().for_each(|group| {
let ranges = group
.range
.iter()
.flat_map(|codon| {
cds_codon_pos_to_ref_range(cds, codon)
.into_iter()
.map(|(range, _)| range)
})
.collect_vec();
group.nuc_subs = nuc_subs
.iter()
.filter(|nuc_sub| ranges.iter().any(|range| range.contains(nuc_sub.pos)))
.cloned()
.collect_vec();
group.nuc_dels = nuc_dels
.iter()
.filter(|nuc_del| ranges.iter().any(|range| have_intersection(range, nuc_del.range())))
.cloned()
.collect_vec();
});
let (aa_substitutions, aa_deletions): (Vec<AaSub>, Vec<AaDel>) = aa_changes_groups
.iter()
.flat_map(|aa_changes_group| &aa_changes_group.changes)
.filter(|change| is_aa_mutated_or_deleted(change.ref_aa, change.qry_aa))
.partition_map(|change| {
if change.qry_aa.is_gap() {
Either::Right(AaDel {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
})
} else {
Either::Left(AaSub {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
qry_aa: change.qry_aa,
})
}
});
// Associate nuc positions with aa mutations.
let nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>> = aa_changes_groups
.iter()
.flat_map(|group| {
group
.changes
.iter()
.filter(|change| AaChangeWithContext::is_mutated_or_deleted(change))
.flat_map(|change| {
change.nuc_ranges.iter().flat_map(move |range| {
range.iter()
// TODO: We convert position to string here, because when communicating with WASM we will pass through
// JSON schema, and JSON object keys must be strings. Maybe there is a way to keep the keys as numbers?
.map(move |pos| (pos.to_string(), AaSub::from(change)))
})
})
})
.into_group_map()
.into_iter()
.map(|(pos, mut aa_muts)| {
aa_muts.sort();
aa_muts.dedup();
(pos, aa_muts)
})
.collect();
FindAaChangesOutput {
aa_changes_groups,
aa_substitutions,
aa_deletions,
nuc_to_aa_muts,
}
}
/// Check whether a given pair if reference and query aminoacids constitute a mutation or deletion
#[inline]
fn is_aa_mutated_or_deleted(ref_aa: Aa, qry_aa: Aa) -> bool {
// NOTE: We chose to ignore mutations to `X`.
qry_aa!= ref_aa && qry_aa!= Aa::X
}
/// Check whether a given codon position corresponds to a sequenced aminoacid
fn is_codon_sequenced(aa_alignment_ranges: &[AaRefRange], codon: AaRefPosition) -> bool {
aa_alignment_ranges
.iter()
.any(|aa_alignment_range| aa_alignment_range.contains(codon))
}
| {
let mut changes = qry_translation
.iter_cdses()
.map(|(qry_name, qry_cds_tr)| {
let ref_cds_tr = ref_translation.get_cds(qry_name)?;
let cds = gene_map.get_cds(&qry_cds_tr.name)?;
Ok(find_aa_changes_for_cds(
cds, qry_seq, ref_seq, ref_cds_tr, qry_cds_tr, nuc_subs, nuc_dels,
))
})
.collect::<Result<Vec<FindAaChangesOutput>, Report>>()?
.into_iter()
// Merge changes from all CDSes into one struct
.fold(FindAaChangesOutput::default(), |mut output, changes| {
output.aa_changes_groups.extend(changes.aa_changes_groups);
output.aa_substitutions.extend(changes.aa_substitutions);
output.aa_deletions.extend(changes.aa_deletions);
extend_map_of_vecs(&mut output.nuc_to_aa_muts, changes.nuc_to_aa_muts);
output
}); | identifier_body |
aa_changes.rs | use crate::alphabet::aa::Aa;
use crate::alphabet::letter::Letter;
use crate::alphabet::letter::{serde_deserialize_seq, serde_serialize_seq};
use crate::alphabet::nuc::Nuc;
use crate::analyze::aa_del::AaDel;
use crate::analyze::aa_sub::AaSub;
use crate::analyze::nuc_del::NucDelRange;
use crate::analyze::nuc_sub::NucSub;
use crate::coord::coord_map_cds_to_global::cds_codon_pos_to_ref_range;
use crate::coord::position::{AaRefPosition, NucRefGlobalPosition, PositionLike};
use crate::coord::range::{have_intersection, AaRefRange, NucRefGlobalRange};
use crate::gene::cds::Cds;
use crate::gene::gene::GeneStrand;
use crate::gene::gene_map::GeneMap;
use crate::translate::complement::reverse_complement_in_place;
use crate::translate::translate_genes::{CdsTranslation, Translation};
use crate::utils::collections::extend_map_of_vecs;
use either::Either;
use eyre::Report;
use itertools::{Itertools, MinMaxResult};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangeWithContext {
pub cds_name: String,
pub pos: AaRefPosition,
pub ref_aa: Aa,
pub qry_aa: Aa,
pub nuc_pos: NucRefGlobalPosition,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub ref_triplet: Vec<Nuc>,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub qry_triplet: Vec<Nuc>,
pub nuc_ranges: Vec<NucRefGlobalRange>,
}
impl AaChangeWithContext {
pub fn new(
cds: &Cds,
pos: AaRefPosition,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
) -> Self {
let ref_aa = ref_tr.seq[pos.as_usize()];
let qry_aa = qry_tr.seq[pos.as_usize()];
let nuc_ranges = cds_codon_pos_to_ref_range(cds, pos);
let ref_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = ref_seq[range.to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let qry_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = qry_seq[range.clamp_range(0, qry_seq.len()).to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let nuc_ranges = nuc_ranges.into_iter().map(|(range, _)| range).collect_vec();
Self {
cds_name: cds.name.clone(),
pos,
ref_aa,
qry_aa,
nuc_pos: nuc_ranges[0].begin,
nuc_ranges,
ref_triplet,
qry_triplet,
}
}
#[inline]
pub fn is_mutated_or_deleted(&self) -> bool {
is_aa_mutated_or_deleted(self.ref_aa, self.qry_aa)
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangesGroup {
name: String,
range: AaRefRange,
changes: Vec<AaChangeWithContext>,
nuc_subs: Vec<NucSub>,
nuc_dels: Vec<NucDelRange>,
}
impl AaChangesGroup {
pub fn new(name: impl AsRef<str>) -> Self {
Self::with_changes(name, vec![])
}
pub fn with_changes(name: impl AsRef<str>, changes: Vec<AaChangeWithContext>) -> Self {
Self {
name: name.as_ref().to_owned(),
range: Self::find_codon_range(&changes),
changes,
nuc_subs: vec![],
nuc_dels: vec![],
}
}
pub fn push(&mut self, change: AaChangeWithContext) {
self.changes.push(change);
self.range = Self::find_codon_range(&self.changes);
}
pub fn last(&self) -> Option<&AaChangeWithContext> {
self.changes.last()
}
fn find_codon_range(changes: &[AaChangeWithContext]) -> AaRefRange {
match changes.iter().minmax_by_key(|change| change.pos) {
MinMaxResult::NoElements => AaRefRange::from_isize(0, 0),
MinMaxResult::OneElement(one) => AaRefRange::new(one.pos, one.pos + 1),
MinMaxResult::MinMax(first, last) => AaRefRange::new(first.pos, last.pos + 1),
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct FindAaChangesOutput {
pub aa_changes_groups: Vec<AaChangesGroup>,
pub aa_substitutions: Vec<AaSub>,
pub aa_deletions: Vec<AaDel>,
pub nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>>,
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in all genes
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
pub fn find_aa_changes(
ref_seq: &[Nuc],
qry_seq: &[Nuc],
ref_translation: &Translation,
qry_translation: &Translation,
gene_map: &GeneMap,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> Result<FindAaChangesOutput, Report> {
let mut changes = qry_translation
.iter_cdses()
.map(|(qry_name, qry_cds_tr)| {
let ref_cds_tr = ref_translation.get_cds(qry_name)?;
let cds = gene_map.get_cds(&qry_cds_tr.name)?;
Ok(find_aa_changes_for_cds(
cds, qry_seq, ref_seq, ref_cds_tr, qry_cds_tr, nuc_subs, nuc_dels,
))
})
.collect::<Result<Vec<FindAaChangesOutput>, Report>>()?
.into_iter()
// Merge changes from all CDSes into one struct
.fold(FindAaChangesOutput::default(), |mut output, changes| {
output.aa_changes_groups.extend(changes.aa_changes_groups);
output.aa_substitutions.extend(changes.aa_substitutions);
output.aa_deletions.extend(changes.aa_deletions);
extend_map_of_vecs(&mut output.nuc_to_aa_muts, changes.nuc_to_aa_muts);
output
});
changes.aa_substitutions.sort();
changes.aa_deletions.sort();
changes.nuc_to_aa_muts.iter_mut().for_each(|(_, vals)| {
vals.sort();
vals.dedup();
});
Ok(changes)
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in one gene
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
///
///
/// ## Implementation details
/// We compare reference and query peptides (extracted by the preceding call to Nextalign),
/// one aminoacid at at time, and deduce changes. We then report the change and relevant nucleotide context surrounding
/// this change.
/// Previously we reported one-to-one mapping of aminoacid changes to corresponding nucleotide changes. However, it
/// was not always accurate, because if there are multiple nucleotide changes in a codon, the direct correspondence
/// might not always be established without knowing the order in which nucleotide changes have occurred. And in the
/// context of Nextclade we don't have this information.
fn find_aa_changes_for_cds(
cds: &Cds,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> FindAaChangesOutput {
assert_eq!(ref_tr.seq.len(), qry_tr.seq.len());
assert_eq!(qry_seq.len(), ref_seq.len());
let aa_alignment_ranges = &qry_tr.alignment_ranges;
let mut aa_changes_groups = vec![AaChangesGroup::new(&cds.name)];
let mut curr_group = aa_changes_groups.last_mut().unwrap();
for codon in AaRefRange::from_usize(0, qry_tr.seq.len()).iter() {
if!is_codon_sequenced(aa_alignment_ranges, codon) {
continue;
}
let ref_aa = ref_tr.seq[codon.as_usize()];
let qry_aa = qry_tr.seq[codon.as_usize()];
if is_aa_mutated_or_deleted(ref_aa, qry_aa) {
match curr_group.last() {
// If current group is empty, then we are about to insert the first codon into the first group.
None => {
if codon > 0 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the group
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// The current codon itself
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// Current group is not empty
Some(prev) => {
// If previous codon in the group is adjacent or almost adjacent (there is 1 item in between), | // then append to the group.
if codon <= prev.pos + 2 {
// If previous codon in the group is not exactly adjacent, there is 1 item in between,
// then cover the hole by inserting previous codon.
if codon == prev.pos + 2 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// And insert the current codon
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// If previous codon in the group is not adjacent, then terminate the current group and start a new group.
else {
// Add one codon to the right, for additional context, to finalize the current group
if is_codon_sequenced(aa_alignment_ranges, prev.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
prev.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
let mut new_group = AaChangesGroup::new(&cds.name);
// Start a new group and push the current codon into it.
if is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the new group.
new_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// Push the current codon to the new group
new_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
aa_changes_groups.push(new_group);
curr_group = aa_changes_groups.last_mut().unwrap();
}
}
}
}
}
// Add one codon to the right, for additional context, to finalize the last group
if let Some(last) = curr_group.last() {
if is_codon_sequenced(aa_alignment_ranges, last.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
last.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
}
// Keep only non-empty groups
aa_changes_groups.retain(|group|!group.range.is_empty() &&!group.changes.is_empty());
aa_changes_groups.iter_mut().for_each(|group| {
let ranges = group
.range
.iter()
.flat_map(|codon| {
cds_codon_pos_to_ref_range(cds, codon)
.into_iter()
.map(|(range, _)| range)
})
.collect_vec();
group.nuc_subs = nuc_subs
.iter()
.filter(|nuc_sub| ranges.iter().any(|range| range.contains(nuc_sub.pos)))
.cloned()
.collect_vec();
group.nuc_dels = nuc_dels
.iter()
.filter(|nuc_del| ranges.iter().any(|range| have_intersection(range, nuc_del.range())))
.cloned()
.collect_vec();
});
let (aa_substitutions, aa_deletions): (Vec<AaSub>, Vec<AaDel>) = aa_changes_groups
.iter()
.flat_map(|aa_changes_group| &aa_changes_group.changes)
.filter(|change| is_aa_mutated_or_deleted(change.ref_aa, change.qry_aa))
.partition_map(|change| {
if change.qry_aa.is_gap() {
Either::Right(AaDel {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
})
} else {
Either::Left(AaSub {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
qry_aa: change.qry_aa,
})
}
});
// Associate nuc positions with aa mutations.
let nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>> = aa_changes_groups
.iter()
.flat_map(|group| {
group
.changes
.iter()
.filter(|change| AaChangeWithContext::is_mutated_or_deleted(change))
.flat_map(|change| {
change.nuc_ranges.iter().flat_map(move |range| {
range.iter()
// TODO: We convert position to string here, because when communicating with WASM we will pass through
// JSON schema, and JSON object keys must be strings. Maybe there is a way to keep the keys as numbers?
.map(move |pos| (pos.to_string(), AaSub::from(change)))
})
})
})
.into_group_map()
.into_iter()
.map(|(pos, mut aa_muts)| {
aa_muts.sort();
aa_muts.dedup();
(pos, aa_muts)
})
.collect();
FindAaChangesOutput {
aa_changes_groups,
aa_substitutions,
aa_deletions,
nuc_to_aa_muts,
}
}
/// Check whether a given pair if reference and query aminoacids constitute a mutation or deletion
#[inline]
fn is_aa_mutated_or_deleted(ref_aa: Aa, qry_aa: Aa) -> bool {
// NOTE: We chose to ignore mutations to `X`.
qry_aa!= ref_aa && qry_aa!= Aa::X
}
/// Check whether a given codon position corresponds to a sequenced aminoacid
fn is_codon_sequenced(aa_alignment_ranges: &[AaRefRange], codon: AaRefPosition) -> bool {
aa_alignment_ranges
.iter()
.any(|aa_alignment_range| aa_alignment_range.contains(codon))
} | random_line_split |
|
aa_changes.rs | use crate::alphabet::aa::Aa;
use crate::alphabet::letter::Letter;
use crate::alphabet::letter::{serde_deserialize_seq, serde_serialize_seq};
use crate::alphabet::nuc::Nuc;
use crate::analyze::aa_del::AaDel;
use crate::analyze::aa_sub::AaSub;
use crate::analyze::nuc_del::NucDelRange;
use crate::analyze::nuc_sub::NucSub;
use crate::coord::coord_map_cds_to_global::cds_codon_pos_to_ref_range;
use crate::coord::position::{AaRefPosition, NucRefGlobalPosition, PositionLike};
use crate::coord::range::{have_intersection, AaRefRange, NucRefGlobalRange};
use crate::gene::cds::Cds;
use crate::gene::gene::GeneStrand;
use crate::gene::gene_map::GeneMap;
use crate::translate::complement::reverse_complement_in_place;
use crate::translate::translate_genes::{CdsTranslation, Translation};
use crate::utils::collections::extend_map_of_vecs;
use either::Either;
use eyre::Report;
use itertools::{Itertools, MinMaxResult};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangeWithContext {
pub cds_name: String,
pub pos: AaRefPosition,
pub ref_aa: Aa,
pub qry_aa: Aa,
pub nuc_pos: NucRefGlobalPosition,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub ref_triplet: Vec<Nuc>,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub qry_triplet: Vec<Nuc>,
pub nuc_ranges: Vec<NucRefGlobalRange>,
}
impl AaChangeWithContext {
pub fn new(
cds: &Cds,
pos: AaRefPosition,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
) -> Self {
let ref_aa = ref_tr.seq[pos.as_usize()];
let qry_aa = qry_tr.seq[pos.as_usize()];
let nuc_ranges = cds_codon_pos_to_ref_range(cds, pos);
let ref_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = ref_seq[range.to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let qry_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = qry_seq[range.clamp_range(0, qry_seq.len()).to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let nuc_ranges = nuc_ranges.into_iter().map(|(range, _)| range).collect_vec();
Self {
cds_name: cds.name.clone(),
pos,
ref_aa,
qry_aa,
nuc_pos: nuc_ranges[0].begin,
nuc_ranges,
ref_triplet,
qry_triplet,
}
}
#[inline]
pub fn is_mutated_or_deleted(&self) -> bool {
is_aa_mutated_or_deleted(self.ref_aa, self.qry_aa)
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangesGroup {
name: String,
range: AaRefRange,
changes: Vec<AaChangeWithContext>,
nuc_subs: Vec<NucSub>,
nuc_dels: Vec<NucDelRange>,
}
impl AaChangesGroup {
pub fn new(name: impl AsRef<str>) -> Self {
Self::with_changes(name, vec![])
}
pub fn with_changes(name: impl AsRef<str>, changes: Vec<AaChangeWithContext>) -> Self {
Self {
name: name.as_ref().to_owned(),
range: Self::find_codon_range(&changes),
changes,
nuc_subs: vec![],
nuc_dels: vec![],
}
}
pub fn push(&mut self, change: AaChangeWithContext) {
self.changes.push(change);
self.range = Self::find_codon_range(&self.changes);
}
pub fn last(&self) -> Option<&AaChangeWithContext> {
self.changes.last()
}
fn find_codon_range(changes: &[AaChangeWithContext]) -> AaRefRange {
match changes.iter().minmax_by_key(|change| change.pos) {
MinMaxResult::NoElements => AaRefRange::from_isize(0, 0),
MinMaxResult::OneElement(one) => AaRefRange::new(one.pos, one.pos + 1),
MinMaxResult::MinMax(first, last) => AaRefRange::new(first.pos, last.pos + 1),
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct FindAaChangesOutput {
pub aa_changes_groups: Vec<AaChangesGroup>,
pub aa_substitutions: Vec<AaSub>,
pub aa_deletions: Vec<AaDel>,
pub nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>>,
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in all genes
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
pub fn find_aa_changes(
ref_seq: &[Nuc],
qry_seq: &[Nuc],
ref_translation: &Translation,
qry_translation: &Translation,
gene_map: &GeneMap,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> Result<FindAaChangesOutput, Report> {
let mut changes = qry_translation
.iter_cdses()
.map(|(qry_name, qry_cds_tr)| {
let ref_cds_tr = ref_translation.get_cds(qry_name)?;
let cds = gene_map.get_cds(&qry_cds_tr.name)?;
Ok(find_aa_changes_for_cds(
cds, qry_seq, ref_seq, ref_cds_tr, qry_cds_tr, nuc_subs, nuc_dels,
))
})
.collect::<Result<Vec<FindAaChangesOutput>, Report>>()?
.into_iter()
// Merge changes from all CDSes into one struct
.fold(FindAaChangesOutput::default(), |mut output, changes| {
output.aa_changes_groups.extend(changes.aa_changes_groups);
output.aa_substitutions.extend(changes.aa_substitutions);
output.aa_deletions.extend(changes.aa_deletions);
extend_map_of_vecs(&mut output.nuc_to_aa_muts, changes.nuc_to_aa_muts);
output
});
changes.aa_substitutions.sort();
changes.aa_deletions.sort();
changes.nuc_to_aa_muts.iter_mut().for_each(|(_, vals)| {
vals.sort();
vals.dedup();
});
Ok(changes)
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in one gene
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
///
///
/// ## Implementation details
/// We compare reference and query peptides (extracted by the preceding call to Nextalign),
/// one aminoacid at at time, and deduce changes. We then report the change and relevant nucleotide context surrounding
/// this change.
/// Previously we reported one-to-one mapping of aminoacid changes to corresponding nucleotide changes. However, it
/// was not always accurate, because if there are multiple nucleotide changes in a codon, the direct correspondence
/// might not always be established without knowing the order in which nucleotide changes have occurred. And in the
/// context of Nextclade we don't have this information.
fn find_aa_changes_for_cds(
cds: &Cds,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> FindAaChangesOutput {
assert_eq!(ref_tr.seq.len(), qry_tr.seq.len());
assert_eq!(qry_seq.len(), ref_seq.len());
let aa_alignment_ranges = &qry_tr.alignment_ranges;
let mut aa_changes_groups = vec![AaChangesGroup::new(&cds.name)];
let mut curr_group = aa_changes_groups.last_mut().unwrap();
for codon in AaRefRange::from_usize(0, qry_tr.seq.len()).iter() {
if!is_codon_sequenced(aa_alignment_ranges, codon) {
continue;
}
let ref_aa = ref_tr.seq[codon.as_usize()];
let qry_aa = qry_tr.seq[codon.as_usize()];
if is_aa_mutated_or_deleted(ref_aa, qry_aa) {
match curr_group.last() {
// If current group is empty, then we are about to insert the first codon into the first group.
None => |
// Current group is not empty
Some(prev) => {
// If previous codon in the group is adjacent or almost adjacent (there is 1 item in between),
// then append to the group.
if codon <= prev.pos + 2 {
// If previous codon in the group is not exactly adjacent, there is 1 item in between,
// then cover the hole by inserting previous codon.
if codon == prev.pos + 2 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// And insert the current codon
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// If previous codon in the group is not adjacent, then terminate the current group and start a new group.
else {
// Add one codon to the right, for additional context, to finalize the current group
if is_codon_sequenced(aa_alignment_ranges, prev.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
prev.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
let mut new_group = AaChangesGroup::new(&cds.name);
// Start a new group and push the current codon into it.
if is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the new group.
new_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// Push the current codon to the new group
new_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
aa_changes_groups.push(new_group);
curr_group = aa_changes_groups.last_mut().unwrap();
}
}
}
}
}
// Add one codon to the right, for additional context, to finalize the last group
if let Some(last) = curr_group.last() {
if is_codon_sequenced(aa_alignment_ranges, last.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
last.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
}
// Keep only non-empty groups
aa_changes_groups.retain(|group|!group.range.is_empty() &&!group.changes.is_empty());
aa_changes_groups.iter_mut().for_each(|group| {
let ranges = group
.range
.iter()
.flat_map(|codon| {
cds_codon_pos_to_ref_range(cds, codon)
.into_iter()
.map(|(range, _)| range)
})
.collect_vec();
group.nuc_subs = nuc_subs
.iter()
.filter(|nuc_sub| ranges.iter().any(|range| range.contains(nuc_sub.pos)))
.cloned()
.collect_vec();
group.nuc_dels = nuc_dels
.iter()
.filter(|nuc_del| ranges.iter().any(|range| have_intersection(range, nuc_del.range())))
.cloned()
.collect_vec();
});
let (aa_substitutions, aa_deletions): (Vec<AaSub>, Vec<AaDel>) = aa_changes_groups
.iter()
.flat_map(|aa_changes_group| &aa_changes_group.changes)
.filter(|change| is_aa_mutated_or_deleted(change.ref_aa, change.qry_aa))
.partition_map(|change| {
if change.qry_aa.is_gap() {
Either::Right(AaDel {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
})
} else {
Either::Left(AaSub {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
qry_aa: change.qry_aa,
})
}
});
// Associate nuc positions with aa mutations.
let nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>> = aa_changes_groups
.iter()
.flat_map(|group| {
group
.changes
.iter()
.filter(|change| AaChangeWithContext::is_mutated_or_deleted(change))
.flat_map(|change| {
change.nuc_ranges.iter().flat_map(move |range| {
range.iter()
// TODO: We convert position to string here, because when communicating with WASM we will pass through
// JSON schema, and JSON object keys must be strings. Maybe there is a way to keep the keys as numbers?
.map(move |pos| (pos.to_string(), AaSub::from(change)))
})
})
})
.into_group_map()
.into_iter()
.map(|(pos, mut aa_muts)| {
aa_muts.sort();
aa_muts.dedup();
(pos, aa_muts)
})
.collect();
FindAaChangesOutput {
aa_changes_groups,
aa_substitutions,
aa_deletions,
nuc_to_aa_muts,
}
}
/// Check whether a given pair if reference and query aminoacids constitute a mutation or deletion
#[inline]
fn is_aa_mutated_or_deleted(ref_aa: Aa, qry_aa: Aa) -> bool {
// NOTE: We chose to ignore mutations to `X`.
qry_aa!= ref_aa && qry_aa!= Aa::X
}
/// Check whether a given codon position corresponds to a sequenced aminoacid
fn is_codon_sequenced(aa_alignment_ranges: &[AaRefRange], codon: AaRefPosition) -> bool {
aa_alignment_ranges
.iter()
.any(|aa_alignment_range| aa_alignment_range.contains(codon))
}
| {
if codon > 0 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the group
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// The current codon itself
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
} | conditional_block |
aa_changes.rs | use crate::alphabet::aa::Aa;
use crate::alphabet::letter::Letter;
use crate::alphabet::letter::{serde_deserialize_seq, serde_serialize_seq};
use crate::alphabet::nuc::Nuc;
use crate::analyze::aa_del::AaDel;
use crate::analyze::aa_sub::AaSub;
use crate::analyze::nuc_del::NucDelRange;
use crate::analyze::nuc_sub::NucSub;
use crate::coord::coord_map_cds_to_global::cds_codon_pos_to_ref_range;
use crate::coord::position::{AaRefPosition, NucRefGlobalPosition, PositionLike};
use crate::coord::range::{have_intersection, AaRefRange, NucRefGlobalRange};
use crate::gene::cds::Cds;
use crate::gene::gene::GeneStrand;
use crate::gene::gene_map::GeneMap;
use crate::translate::complement::reverse_complement_in_place;
use crate::translate::translate_genes::{CdsTranslation, Translation};
use crate::utils::collections::extend_map_of_vecs;
use either::Either;
use eyre::Report;
use itertools::{Itertools, MinMaxResult};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct | {
pub cds_name: String,
pub pos: AaRefPosition,
pub ref_aa: Aa,
pub qry_aa: Aa,
pub nuc_pos: NucRefGlobalPosition,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub ref_triplet: Vec<Nuc>,
#[schemars(with = "String")]
#[serde(serialize_with = "serde_serialize_seq")]
#[serde(deserialize_with = "serde_deserialize_seq")]
pub qry_triplet: Vec<Nuc>,
pub nuc_ranges: Vec<NucRefGlobalRange>,
}
impl AaChangeWithContext {
pub fn new(
cds: &Cds,
pos: AaRefPosition,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
) -> Self {
let ref_aa = ref_tr.seq[pos.as_usize()];
let qry_aa = qry_tr.seq[pos.as_usize()];
let nuc_ranges = cds_codon_pos_to_ref_range(cds, pos);
let ref_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = ref_seq[range.to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let qry_triplet = nuc_ranges
.iter()
.flat_map(|(range, strand)| {
let mut nucs = qry_seq[range.clamp_range(0, qry_seq.len()).to_std()].to_vec();
if strand == &GeneStrand::Reverse {
reverse_complement_in_place(&mut nucs);
}
nucs
})
.collect_vec();
let nuc_ranges = nuc_ranges.into_iter().map(|(range, _)| range).collect_vec();
Self {
cds_name: cds.name.clone(),
pos,
ref_aa,
qry_aa,
nuc_pos: nuc_ranges[0].begin,
nuc_ranges,
ref_triplet,
qry_triplet,
}
}
#[inline]
pub fn is_mutated_or_deleted(&self) -> bool {
is_aa_mutated_or_deleted(self.ref_aa, self.qry_aa)
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct AaChangesGroup {
name: String,
range: AaRefRange,
changes: Vec<AaChangeWithContext>,
nuc_subs: Vec<NucSub>,
nuc_dels: Vec<NucDelRange>,
}
impl AaChangesGroup {
pub fn new(name: impl AsRef<str>) -> Self {
Self::with_changes(name, vec![])
}
pub fn with_changes(name: impl AsRef<str>, changes: Vec<AaChangeWithContext>) -> Self {
Self {
name: name.as_ref().to_owned(),
range: Self::find_codon_range(&changes),
changes,
nuc_subs: vec![],
nuc_dels: vec![],
}
}
pub fn push(&mut self, change: AaChangeWithContext) {
self.changes.push(change);
self.range = Self::find_codon_range(&self.changes);
}
pub fn last(&self) -> Option<&AaChangeWithContext> {
self.changes.last()
}
fn find_codon_range(changes: &[AaChangeWithContext]) -> AaRefRange {
match changes.iter().minmax_by_key(|change| change.pos) {
MinMaxResult::NoElements => AaRefRange::from_isize(0, 0),
MinMaxResult::OneElement(one) => AaRefRange::new(one.pos, one.pos + 1),
MinMaxResult::MinMax(first, last) => AaRefRange::new(first.pos, last.pos + 1),
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, schemars::JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct FindAaChangesOutput {
pub aa_changes_groups: Vec<AaChangesGroup>,
pub aa_substitutions: Vec<AaSub>,
pub aa_deletions: Vec<AaDel>,
pub nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>>,
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in all genes
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
pub fn find_aa_changes(
ref_seq: &[Nuc],
qry_seq: &[Nuc],
ref_translation: &Translation,
qry_translation: &Translation,
gene_map: &GeneMap,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> Result<FindAaChangesOutput, Report> {
let mut changes = qry_translation
.iter_cdses()
.map(|(qry_name, qry_cds_tr)| {
let ref_cds_tr = ref_translation.get_cds(qry_name)?;
let cds = gene_map.get_cds(&qry_cds_tr.name)?;
Ok(find_aa_changes_for_cds(
cds, qry_seq, ref_seq, ref_cds_tr, qry_cds_tr, nuc_subs, nuc_dels,
))
})
.collect::<Result<Vec<FindAaChangesOutput>, Report>>()?
.into_iter()
// Merge changes from all CDSes into one struct
.fold(FindAaChangesOutput::default(), |mut output, changes| {
output.aa_changes_groups.extend(changes.aa_changes_groups);
output.aa_substitutions.extend(changes.aa_substitutions);
output.aa_deletions.extend(changes.aa_deletions);
extend_map_of_vecs(&mut output.nuc_to_aa_muts, changes.nuc_to_aa_muts);
output
});
changes.aa_substitutions.sort();
changes.aa_deletions.sort();
changes.nuc_to_aa_muts.iter_mut().for_each(|(_, vals)| {
vals.sort();
vals.dedup();
});
Ok(changes)
}
/// Finds aminoacid substitutions and deletions in query peptides relative to reference peptides, in one gene
///
/// ## Precondition
/// Nucleotide sequences and peptides are required to be stripped from insertions
///
///
/// ## Implementation details
/// We compare reference and query peptides (extracted by the preceding call to Nextalign),
/// one aminoacid at at time, and deduce changes. We then report the change and relevant nucleotide context surrounding
/// this change.
/// Previously we reported one-to-one mapping of aminoacid changes to corresponding nucleotide changes. However, it
/// was not always accurate, because if there are multiple nucleotide changes in a codon, the direct correspondence
/// might not always be established without knowing the order in which nucleotide changes have occurred. And in the
/// context of Nextclade we don't have this information.
fn find_aa_changes_for_cds(
cds: &Cds,
qry_seq: &[Nuc],
ref_seq: &[Nuc],
ref_tr: &CdsTranslation,
qry_tr: &CdsTranslation,
nuc_subs: &[NucSub],
nuc_dels: &[NucDelRange],
) -> FindAaChangesOutput {
assert_eq!(ref_tr.seq.len(), qry_tr.seq.len());
assert_eq!(qry_seq.len(), ref_seq.len());
let aa_alignment_ranges = &qry_tr.alignment_ranges;
let mut aa_changes_groups = vec![AaChangesGroup::new(&cds.name)];
let mut curr_group = aa_changes_groups.last_mut().unwrap();
for codon in AaRefRange::from_usize(0, qry_tr.seq.len()).iter() {
if!is_codon_sequenced(aa_alignment_ranges, codon) {
continue;
}
let ref_aa = ref_tr.seq[codon.as_usize()];
let qry_aa = qry_tr.seq[codon.as_usize()];
if is_aa_mutated_or_deleted(ref_aa, qry_aa) {
match curr_group.last() {
// If current group is empty, then we are about to insert the first codon into the first group.
None => {
if codon > 0 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the group
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// The current codon itself
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// Current group is not empty
Some(prev) => {
// If previous codon in the group is adjacent or almost adjacent (there is 1 item in between),
// then append to the group.
if codon <= prev.pos + 2 {
// If previous codon in the group is not exactly adjacent, there is 1 item in between,
// then cover the hole by inserting previous codon.
if codon == prev.pos + 2 && is_codon_sequenced(aa_alignment_ranges, codon - 1) {
curr_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// And insert the current codon
curr_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
}
// If previous codon in the group is not adjacent, then terminate the current group and start a new group.
else {
// Add one codon to the right, for additional context, to finalize the current group
if is_codon_sequenced(aa_alignment_ranges, prev.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
prev.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
let mut new_group = AaChangesGroup::new(&cds.name);
// Start a new group and push the current codon into it.
if is_codon_sequenced(aa_alignment_ranges, codon - 1) {
// Also prepend one codon to the left, for additional context, to start the new group.
new_group.push(AaChangeWithContext::new(
cds,
codon - 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
// Push the current codon to the new group
new_group.push(AaChangeWithContext::new(cds, codon, qry_seq, ref_seq, ref_tr, qry_tr));
aa_changes_groups.push(new_group);
curr_group = aa_changes_groups.last_mut().unwrap();
}
}
}
}
}
// Add one codon to the right, for additional context, to finalize the last group
if let Some(last) = curr_group.last() {
if is_codon_sequenced(aa_alignment_ranges, last.pos + 1) {
curr_group.push(AaChangeWithContext::new(
cds,
last.pos + 1,
qry_seq,
ref_seq,
ref_tr,
qry_tr,
));
}
}
// Keep only non-empty groups
aa_changes_groups.retain(|group|!group.range.is_empty() &&!group.changes.is_empty());
aa_changes_groups.iter_mut().for_each(|group| {
let ranges = group
.range
.iter()
.flat_map(|codon| {
cds_codon_pos_to_ref_range(cds, codon)
.into_iter()
.map(|(range, _)| range)
})
.collect_vec();
group.nuc_subs = nuc_subs
.iter()
.filter(|nuc_sub| ranges.iter().any(|range| range.contains(nuc_sub.pos)))
.cloned()
.collect_vec();
group.nuc_dels = nuc_dels
.iter()
.filter(|nuc_del| ranges.iter().any(|range| have_intersection(range, nuc_del.range())))
.cloned()
.collect_vec();
});
let (aa_substitutions, aa_deletions): (Vec<AaSub>, Vec<AaDel>) = aa_changes_groups
.iter()
.flat_map(|aa_changes_group| &aa_changes_group.changes)
.filter(|change| is_aa_mutated_or_deleted(change.ref_aa, change.qry_aa))
.partition_map(|change| {
if change.qry_aa.is_gap() {
Either::Right(AaDel {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
})
} else {
Either::Left(AaSub {
cds_name: cds.name.clone(),
ref_aa: change.ref_aa,
pos: change.pos,
qry_aa: change.qry_aa,
})
}
});
// Associate nuc positions with aa mutations.
let nuc_to_aa_muts: BTreeMap<String, Vec<AaSub>> = aa_changes_groups
.iter()
.flat_map(|group| {
group
.changes
.iter()
.filter(|change| AaChangeWithContext::is_mutated_or_deleted(change))
.flat_map(|change| {
change.nuc_ranges.iter().flat_map(move |range| {
range.iter()
// TODO: We convert position to string here, because when communicating with WASM we will pass through
// JSON schema, and JSON object keys must be strings. Maybe there is a way to keep the keys as numbers?
.map(move |pos| (pos.to_string(), AaSub::from(change)))
})
})
})
.into_group_map()
.into_iter()
.map(|(pos, mut aa_muts)| {
aa_muts.sort();
aa_muts.dedup();
(pos, aa_muts)
})
.collect();
FindAaChangesOutput {
aa_changes_groups,
aa_substitutions,
aa_deletions,
nuc_to_aa_muts,
}
}
/// Check whether a given pair if reference and query aminoacids constitute a mutation or deletion
#[inline]
fn is_aa_mutated_or_deleted(ref_aa: Aa, qry_aa: Aa) -> bool {
// NOTE: We chose to ignore mutations to `X`.
qry_aa!= ref_aa && qry_aa!= Aa::X
}
/// Check whether a given codon position corresponds to a sequenced aminoacid
fn is_codon_sequenced(aa_alignment_ranges: &[AaRefRange], codon: AaRefPosition) -> bool {
aa_alignment_ranges
.iter()
.any(|aa_alignment_range| aa_alignment_range.contains(codon))
}
| AaChangeWithContext | identifier_name |
lib.rs | #![cfg_attr(feature = "cargo-clippy", allow(clone_on_ref_ptr))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#![deny(warnings)]
extern crate bytes;
extern crate conduit_proxy_controller_grpc;
extern crate env_logger;
extern crate deflate;
#[macro_use]
extern crate futures;
extern crate futures_mpsc_lossy;
extern crate futures_watch;
extern crate h2;
extern crate http;
extern crate httparse;
extern crate hyper;
#[cfg(target_os = "linux")]
extern crate inotify;
extern crate ipnet;
#[cfg(target_os = "linux")]
extern crate libc;
#[macro_use]
extern crate log;
#[cfg_attr(test, macro_use)]
extern crate indexmap;
#[cfg(target_os = "linux")]
extern crate procinfo;
extern crate prost;
extern crate prost_types;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
extern crate rand;
extern crate regex;
extern crate ring;
#[cfg(test)]
extern crate tempdir;
extern crate tokio;
extern crate tokio_connect;
extern crate tokio_timer;
extern crate tower_balance;
extern crate tower_buffer;
extern crate tower_discover;
extern crate tower_grpc;
extern crate tower_h2;
extern crate tower_h2_balance;
extern crate tower_reconnect;
extern crate tower_service;
extern crate conduit_proxy_router;
extern crate tower_util;
extern crate tower_in_flight_limit;
extern crate trust_dns_resolver;
extern crate try_lock;
use futures::*;
use std::error::Error;
use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use indexmap::IndexSet;
use tokio::{
executor::{self, DefaultExecutor, Executor},
runtime::current_thread,
};
use tower_service::NewService;
use tower_fn::*;
use conduit_proxy_router::{Recognize, Router, Error as RouteError};
pub mod app;
mod bind;
pub mod config;
mod connection;
pub mod conditional;
pub mod control;
pub mod convert;
pub mod ctx;
mod dns;
mod drain;
pub mod fs_watch;
mod inbound;
mod logging;
mod map_err;
mod outbound;
pub mod stream;
pub mod task;
pub mod telemetry;
mod transparency;
mod transport;
pub mod timeout;
mod tower_fn; // TODO: move to tower-fn
mod watch_service; // TODO: move to tower
use bind::Bind;
use conditional::Conditional;
use connection::BoundPort;
use inbound::Inbound;
use map_err::MapErr;
use task::MainRuntime;
use transparency::{HttpBody, Server};
pub use transport::{AddrInfo, GetOriginalDst, SoOriginalDst, tls};
use outbound::Outbound;
pub use watch_service::WatchService;
/// Runs a sidecar proxy.
///
/// The proxy binds two listeners:
///
/// - a private socket (TCP or UNIX) for outbound requests to other instances;
/// - and a public socket (TCP and optionally TLS) for inbound requests from other
/// instances.
///
/// The public listener forwards requests to a local socket (TCP or UNIX).
///
/// The private listener routes requests to service-discovery-aware load-balancer.
///
pub struct Main<G> {
config: config::Config,
control_listener: BoundPort,
inbound_listener: BoundPort,
outbound_listener: BoundPort,
metrics_listener: BoundPort,
get_original_dst: G,
runtime: MainRuntime,
}
impl<G> Main<G>
where
G: GetOriginalDst + Clone + Send +'static,
{
pub fn new<R>(
config: config::Config,
get_original_dst: G,
runtime: R
) -> Self
where
R: Into<MainRuntime>,
{
let control_listener = BoundPort::new(config.control_listener.addr)
.expect("controller listener bind");
let inbound_listener = BoundPort::new(config.public_listener.addr)
.expect("public listener bind");
let outbound_listener = BoundPort::new(config.private_listener.addr)
.expect("private listener bind");
let runtime = runtime.into();
let metrics_listener = BoundPort::new(config.metrics_listener.addr)
.expect("metrics listener bind");
Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
runtime,
}
}
pub fn control_addr(&self) -> SocketAddr {
self.control_listener.local_addr()
}
pub fn inbound_addr(&self) -> SocketAddr {
self.inbound_listener.local_addr()
}
pub fn outbound_addr(&self) -> SocketAddr {
self.outbound_listener.local_addr()
}
pub fn metrics_addr(&self) -> SocketAddr {
self.metrics_listener.local_addr()
}
pub fn run_until<F>(self, shutdown_signal: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
let process_ctx = ctx::Process::new(&self.config);
let Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
mut runtime,
} = self;
let control_host_and_port = config.control_host_and_port.clone();
info!("using controller at {:?}", control_host_and_port);
info!("routing on {:?}", outbound_listener.local_addr());
info!(
"proxying on {:?} to {:?}",
inbound_listener.local_addr(),
config.private_forward
);
info!(
"serving Prometheus metrics on {:?}",
metrics_listener.local_addr(),
);
info!(
"protocol detection disabled for inbound ports {:?}",
config.inbound_ports_disable_protocol_detection,
);
info!(
"protocol detection disabled for outbound ports {:?}",
config.outbound_ports_disable_protocol_detection,
);
let (taps, observe) = control::Observe::new(100);
let (sensors, telemetry) = telemetry::new(
&process_ctx,
config.event_buffer_capacity,
config.metrics_retain_idle,
&taps,
);
let (tls_client_config, tls_server_config, tls_cfg_bg) =
tls::watch_for_config_changes(
config.tls_settings.as_ref(),
sensors.tls_config(),
);
let controller_tls = config.tls_settings.as_ref().and_then(|settings| {
settings.controller_identity.as_ref().map(|controller_identity| {
tls::ConnectionConfig {
identity: controller_identity.clone(),
config: tls_client_config.clone(),
}
})
});
let (dns_resolver, dns_bg) = dns::Resolver::from_system_config_and_env(&config)
.unwrap_or_else(|e| {
// TODO: Make DNS configuration infallible.
panic!("invalid DNS configuration: {:?}", e);
});
let (resolver, resolver_bg) = control::destination::new(
dns_resolver.clone(),
config.namespaces.clone(),
control_host_and_port,
controller_tls,
);
let (drain_tx, drain_rx) = drain::channel();
let bind = Bind::new(tls_client_config).with_sensors(sensors.clone());
// Setup the public listener. This will listen on a publicly accessible
// address and listen for inbound connections that should be forwarded
// to the managed application (private destination).
let inbound = {
let ctx = ctx::Proxy::inbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let default_addr = config.private_forward.map(|a| a.into());
let router = Router::new(
Inbound::new(default_addr, bind),
config.inbound_router_capacity,
config.inbound_router_max_idle_age,
);
let tls_settings = config.tls_settings.as_ref().map(|settings| {
tls::ConnectionConfig {
identity: settings.pod_identity.clone(),
config: tls_server_config
}
});
serve(
inbound_listener,
tls_settings,
router,
config.private_connect_timeout,
config.inbound_ports_disable_protocol_detection,
ctx,
sensors.clone(),
get_original_dst.clone(),
drain_rx.clone(),
)
};
// Setup the private listener. This will listen on a locally accessible
// address and listen for outbound requests that should be routed
// to a remote service (public destination).
let outbound = {
let ctx = ctx::Proxy::outbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let router = Router::new(
Outbound::new(bind, resolver, config.bind_timeout),
config.outbound_router_capacity,
config.outbound_router_max_idle_age,
);
serve(
outbound_listener,
Conditional::None(tls::ReasonForNoTls::InternalTraffic),
router,
config.public_connect_timeout,
config.outbound_ports_disable_protocol_detection,
ctx,
sensors,
get_original_dst,
drain_rx,
)
};
trace!("running");
let (_tx, admin_shutdown_signal) = futures::sync::oneshot::channel::<()>();
{
thread::Builder::new()
.name("admin".into())
.spawn(move || {
use conduit_proxy_controller_grpc::tap::server::TapServer;
let mut rt = current_thread::Runtime::new()
.expect("initialize admin thread runtime");
let tap = serve_tap(control_listener, TapServer::new(observe));
let metrics_server = telemetry.serve_metrics(metrics_listener);
let fut = ::logging::admin().bg("resolver").future(resolver_bg)
.join5(
::logging::admin().bg("telemetry").future(telemetry),
tap.map_err(|_| {}),
metrics_server.map_err(|_| {}),
::logging::admin().bg("dns-resolver").future(dns_bg),
)
// There's no `Future::join6` combinator...
.join(::logging::admin().bg("tls-config").future(tls_cfg_bg))
.map(|_| {});
rt.spawn(Box::new(fut));
let shutdown = admin_shutdown_signal.then(|_| Ok::<(), ()>(()));
rt.block_on(shutdown).expect("admin");
trace!("admin shutdown finished");
})
.expect("initialize controller api thread");
trace!("controller client thread spawned");
}
let fut = inbound
.join(outbound)
.map(|_| ())
.map_err(|err| error!("main error: {:?}", err));
runtime.spawn(Box::new(fut));
trace!("main task spawned");
let shutdown_signal = shutdown_signal.and_then(move |()| {
debug!("shutdown signaled");
drain_tx.drain()
});
runtime.run_until(shutdown_signal).expect("executor");
debug!("shutdown complete");
}
}
fn serve<R, B, E, F, G>(
bound_port: BoundPort,
tls_config: tls::ConditionalConnectionConfig<tls::ServerConfigWatch>,
router: Router<R>,
tcp_connect_timeout: Duration,
disable_protocol_detection_ports: IndexSet<u16>,
proxy_ctx: Arc<ctx::Proxy>,
sensors: telemetry::Sensors,
get_orig_dst: G,
drain_rx: drain::Watch,
) -> impl Future<Item = (), Error = io::Error> + Send +'static
where
B: tower_h2::Body + Default + Send +'static,
B::Data: Send,
<B::Data as ::bytes::IntoBuf>::Buf: Send,
E: Error + Send +'static,
F: Error + Send +'static,
R: Recognize<
Request = http::Request<HttpBody>,
Response = http::Response<B>,
Error = E,
RouteError = F,
>
+ Send + Sync +'static,
R::Key: Send,
R::Service: Send,
<R::Service as tower_service::Service>::Future: Send,
Router<R>: Send,
G: GetOriginalDst + Send +'static,
{
let stack = Arc::new(NewServiceFn::new(move || {
// Clone the router handle
let router = router.clone();
// Map errors to appropriate response error codes.
let map_err = MapErr::new(router, |e| {
match e {
RouteError::Route(r) => {
error!(" turning route error: {} into 500", r);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::Inner(i) => |
RouteError::NotRecognized => {
error!("turning route not recognized error into 500");
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NoCapacity(capacity) => {
// TODO For H2 streams, we should probably signal a protocol-level
// capacity change.
error!("router at capacity ({}); returning a 503", capacity);
http::StatusCode::SERVICE_UNAVAILABLE
}
}
});
// Install the request open timestamp module at the very top
// of the stack, in order to take the timestamp as close as
// possible to the beginning of the request's lifetime.
telemetry::sensor::http::TimestampRequestOpen::new(map_err)
}));
let listen_addr = bound_port.local_addr();
let server = Server::new(
listen_addr,
proxy_ctx.clone(),
sensors,
get_orig_dst,
stack,
tcp_connect_timeout,
disable_protocol_detection_ports,
drain_rx.clone(),
);
let log = server.log().clone();
let accept = {
let fut = bound_port.listen_and_fold(
tls_config,
(),
move |(), (connection, remote_addr)| {
let s = server.serve(connection, remote_addr);
// Logging context is configured by the server.
let r = DefaultExecutor::current()
.spawn(Box::new(s))
.map_err(task::Error::into_io);
future::result(r)
},
);
log.future(fut)
};
let accept_until = Cancelable {
future: accept,
canceled: false,
};
// As soon as we get a shutdown signal, the listener
// is canceled immediately.
drain_rx.watch(accept_until, |accept| {
accept.canceled = true;
})
}
/// Can cancel a future by setting a flag.
///
/// Used to 'watch' the accept futures, and close the listeners
/// as soon as the shutdown signal starts.
struct Cancelable<F> {
future: F,
canceled: bool,
}
impl<F> Future for Cancelable<F>
where
F: Future<Item=()>,
{
type Item = ();
type Error = F::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.canceled {
Ok(().into())
} else {
self.future.poll()
}
}
}
fn serve_tap<N, B>(
bound_port: BoundPort,
new_service: N,
) -> impl Future<Item = (), Error = io::Error> +'static
where
B: tower_h2::Body + Send +'static,
<B::Data as bytes::IntoBuf>::Buf: Send,
N: NewService<
Request = http::Request<tower_h2::RecvBody>,
Response = http::Response<B>
>
+ Send +'static,
tower_h2::server::Connection<
connection::Connection,
N,
::logging::ServerExecutor,
B,
()
>: Future<Item = ()>,
{
let log = logging::admin().server("tap", bound_port.local_addr());
let h2_builder = h2::server::Builder::default();
let server = tower_h2::Server::new(
new_service,
h2_builder,
log.clone().executor(),
);
let fut = {
let log = log.clone();
// TODO: serve over TLS.
bound_port.listen_and_fold(
Conditional::None(tls::ReasonForNoIdentity::NotImplementedForTap.into()),
server,
move |server, (session, remote)| {
let log = log.clone().with_remote(remote);
let serve = server.serve(session).map_err(|_| ());
let r = executor::current_thread::TaskExecutor::current()
.spawn_local(Box::new(log.future(serve)))
.map(move |_| server)
.map_err(task::Error::into_io);
future::result(r)
},
)
};
log.future(fut)
}
| {
error!("turning {} into 500", i);
http::StatusCode::INTERNAL_SERVER_ERROR
} | conditional_block |
lib.rs | #![cfg_attr(feature = "cargo-clippy", allow(clone_on_ref_ptr))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#![deny(warnings)]
extern crate bytes;
extern crate conduit_proxy_controller_grpc;
extern crate env_logger;
extern crate deflate;
#[macro_use]
extern crate futures;
extern crate futures_mpsc_lossy;
extern crate futures_watch;
extern crate h2;
extern crate http;
extern crate httparse;
extern crate hyper;
#[cfg(target_os = "linux")]
extern crate inotify;
extern crate ipnet;
#[cfg(target_os = "linux")]
extern crate libc;
#[macro_use]
extern crate log;
#[cfg_attr(test, macro_use)]
extern crate indexmap;
#[cfg(target_os = "linux")]
extern crate procinfo;
extern crate prost;
extern crate prost_types;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
extern crate rand;
extern crate regex;
extern crate ring;
#[cfg(test)]
extern crate tempdir;
extern crate tokio;
extern crate tokio_connect;
extern crate tokio_timer;
extern crate tower_balance;
extern crate tower_buffer;
extern crate tower_discover;
extern crate tower_grpc;
extern crate tower_h2;
extern crate tower_h2_balance;
extern crate tower_reconnect;
extern crate tower_service;
extern crate conduit_proxy_router;
extern crate tower_util;
extern crate tower_in_flight_limit;
extern crate trust_dns_resolver;
extern crate try_lock;
use futures::*;
use std::error::Error;
use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use indexmap::IndexSet;
use tokio::{
executor::{self, DefaultExecutor, Executor},
runtime::current_thread,
};
use tower_service::NewService;
use tower_fn::*;
use conduit_proxy_router::{Recognize, Router, Error as RouteError};
pub mod app;
mod bind;
pub mod config;
mod connection;
pub mod conditional;
pub mod control;
pub mod convert;
pub mod ctx;
mod dns;
mod drain;
pub mod fs_watch;
mod inbound;
mod logging;
mod map_err;
mod outbound;
pub mod stream;
pub mod task;
pub mod telemetry;
mod transparency;
mod transport;
pub mod timeout;
mod tower_fn; // TODO: move to tower-fn
mod watch_service; // TODO: move to tower
use bind::Bind;
use conditional::Conditional;
use connection::BoundPort;
use inbound::Inbound;
use map_err::MapErr;
use task::MainRuntime;
use transparency::{HttpBody, Server};
pub use transport::{AddrInfo, GetOriginalDst, SoOriginalDst, tls};
use outbound::Outbound;
pub use watch_service::WatchService;
/// Runs a sidecar proxy.
///
/// The proxy binds two listeners:
///
/// - a private socket (TCP or UNIX) for outbound requests to other instances;
/// - and a public socket (TCP and optionally TLS) for inbound requests from other
/// instances.
///
/// The public listener forwards requests to a local socket (TCP or UNIX).
///
/// The private listener routes requests to service-discovery-aware load-balancer.
///
pub struct Main<G> {
config: config::Config,
control_listener: BoundPort,
inbound_listener: BoundPort,
outbound_listener: BoundPort,
metrics_listener: BoundPort,
get_original_dst: G,
runtime: MainRuntime,
}
impl<G> Main<G>
where
G: GetOriginalDst + Clone + Send +'static,
{
pub fn new<R>(
config: config::Config,
get_original_dst: G,
runtime: R
) -> Self
where
R: Into<MainRuntime>,
{
let control_listener = BoundPort::new(config.control_listener.addr)
.expect("controller listener bind");
let inbound_listener = BoundPort::new(config.public_listener.addr)
.expect("public listener bind");
let outbound_listener = BoundPort::new(config.private_listener.addr)
.expect("private listener bind");
let runtime = runtime.into();
let metrics_listener = BoundPort::new(config.metrics_listener.addr)
.expect("metrics listener bind");
Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
runtime,
}
}
pub fn control_addr(&self) -> SocketAddr {
self.control_listener.local_addr()
}
pub fn inbound_addr(&self) -> SocketAddr {
self.inbound_listener.local_addr()
}
pub fn outbound_addr(&self) -> SocketAddr {
self.outbound_listener.local_addr()
}
pub fn metrics_addr(&self) -> SocketAddr {
self.metrics_listener.local_addr()
}
pub fn run_until<F>(self, shutdown_signal: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
let process_ctx = ctx::Process::new(&self.config);
let Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
mut runtime,
} = self;
let control_host_and_port = config.control_host_and_port.clone();
info!("using controller at {:?}", control_host_and_port);
info!("routing on {:?}", outbound_listener.local_addr());
info!(
"proxying on {:?} to {:?}",
inbound_listener.local_addr(),
config.private_forward
);
info!(
"serving Prometheus metrics on {:?}",
metrics_listener.local_addr(),
);
info!(
"protocol detection disabled for inbound ports {:?}",
config.inbound_ports_disable_protocol_detection,
);
info!(
"protocol detection disabled for outbound ports {:?}",
config.outbound_ports_disable_protocol_detection,
);
let (taps, observe) = control::Observe::new(100);
let (sensors, telemetry) = telemetry::new(
&process_ctx,
config.event_buffer_capacity,
config.metrics_retain_idle,
&taps,
);
let (tls_client_config, tls_server_config, tls_cfg_bg) =
tls::watch_for_config_changes(
config.tls_settings.as_ref(),
sensors.tls_config(),
);
let controller_tls = config.tls_settings.as_ref().and_then(|settings| {
settings.controller_identity.as_ref().map(|controller_identity| {
tls::ConnectionConfig {
identity: controller_identity.clone(),
config: tls_client_config.clone(),
}
})
});
let (dns_resolver, dns_bg) = dns::Resolver::from_system_config_and_env(&config)
.unwrap_or_else(|e| {
// TODO: Make DNS configuration infallible.
panic!("invalid DNS configuration: {:?}", e);
});
let (resolver, resolver_bg) = control::destination::new(
dns_resolver.clone(),
config.namespaces.clone(),
control_host_and_port,
controller_tls,
);
let (drain_tx, drain_rx) = drain::channel();
let bind = Bind::new(tls_client_config).with_sensors(sensors.clone());
// Setup the public listener. This will listen on a publicly accessible
// address and listen for inbound connections that should be forwarded
// to the managed application (private destination).
let inbound = {
let ctx = ctx::Proxy::inbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let default_addr = config.private_forward.map(|a| a.into());
let router = Router::new(
Inbound::new(default_addr, bind),
config.inbound_router_capacity,
config.inbound_router_max_idle_age,
);
let tls_settings = config.tls_settings.as_ref().map(|settings| {
tls::ConnectionConfig {
identity: settings.pod_identity.clone(),
config: tls_server_config
}
});
serve(
inbound_listener,
tls_settings,
router,
config.private_connect_timeout,
config.inbound_ports_disable_protocol_detection,
ctx,
sensors.clone(),
get_original_dst.clone(),
drain_rx.clone(),
)
};
// Setup the private listener. This will listen on a locally accessible
// address and listen for outbound requests that should be routed
// to a remote service (public destination).
let outbound = {
let ctx = ctx::Proxy::outbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let router = Router::new(
Outbound::new(bind, resolver, config.bind_timeout),
config.outbound_router_capacity,
config.outbound_router_max_idle_age,
);
serve(
outbound_listener,
Conditional::None(tls::ReasonForNoTls::InternalTraffic),
router,
config.public_connect_timeout,
config.outbound_ports_disable_protocol_detection,
ctx,
sensors,
get_original_dst,
drain_rx,
)
};
trace!("running");
let (_tx, admin_shutdown_signal) = futures::sync::oneshot::channel::<()>();
{
thread::Builder::new()
.name("admin".into())
.spawn(move || {
use conduit_proxy_controller_grpc::tap::server::TapServer;
let mut rt = current_thread::Runtime::new()
.expect("initialize admin thread runtime");
let tap = serve_tap(control_listener, TapServer::new(observe));
let metrics_server = telemetry.serve_metrics(metrics_listener);
let fut = ::logging::admin().bg("resolver").future(resolver_bg)
.join5(
::logging::admin().bg("telemetry").future(telemetry),
tap.map_err(|_| {}),
metrics_server.map_err(|_| {}),
::logging::admin().bg("dns-resolver").future(dns_bg),
)
// There's no `Future::join6` combinator...
.join(::logging::admin().bg("tls-config").future(tls_cfg_bg))
.map(|_| {});
rt.spawn(Box::new(fut));
let shutdown = admin_shutdown_signal.then(|_| Ok::<(), ()>(()));
rt.block_on(shutdown).expect("admin");
trace!("admin shutdown finished");
})
.expect("initialize controller api thread");
trace!("controller client thread spawned");
}
let fut = inbound
.join(outbound)
.map(|_| ())
.map_err(|err| error!("main error: {:?}", err));
runtime.spawn(Box::new(fut));
trace!("main task spawned");
let shutdown_signal = shutdown_signal.and_then(move |()| {
debug!("shutdown signaled");
drain_tx.drain()
});
runtime.run_until(shutdown_signal).expect("executor");
debug!("shutdown complete");
}
}
fn serve<R, B, E, F, G>(
bound_port: BoundPort,
tls_config: tls::ConditionalConnectionConfig<tls::ServerConfigWatch>,
router: Router<R>,
tcp_connect_timeout: Duration,
disable_protocol_detection_ports: IndexSet<u16>,
proxy_ctx: Arc<ctx::Proxy>,
sensors: telemetry::Sensors,
get_orig_dst: G,
drain_rx: drain::Watch,
) -> impl Future<Item = (), Error = io::Error> + Send +'static
where
B: tower_h2::Body + Default + Send +'static,
B::Data: Send,
<B::Data as ::bytes::IntoBuf>::Buf: Send,
E: Error + Send +'static,
F: Error + Send +'static,
R: Recognize<
Request = http::Request<HttpBody>,
Response = http::Response<B>,
Error = E,
RouteError = F,
>
+ Send + Sync +'static,
R::Key: Send,
R::Service: Send,
<R::Service as tower_service::Service>::Future: Send,
Router<R>: Send,
G: GetOriginalDst + Send +'static,
| RouteError::NoCapacity(capacity) => {
// TODO For H2 streams, we should probably signal a protocol-level
// capacity change.
error!("router at capacity ({}); returning a 503", capacity);
http::StatusCode::SERVICE_UNAVAILABLE
}
}
});
// Install the request open timestamp module at the very top
// of the stack, in order to take the timestamp as close as
// possible to the beginning of the request's lifetime.
telemetry::sensor::http::TimestampRequestOpen::new(map_err)
}));
let listen_addr = bound_port.local_addr();
let server = Server::new(
listen_addr,
proxy_ctx.clone(),
sensors,
get_orig_dst,
stack,
tcp_connect_timeout,
disable_protocol_detection_ports,
drain_rx.clone(),
);
let log = server.log().clone();
let accept = {
let fut = bound_port.listen_and_fold(
tls_config,
(),
move |(), (connection, remote_addr)| {
let s = server.serve(connection, remote_addr);
// Logging context is configured by the server.
let r = DefaultExecutor::current()
.spawn(Box::new(s))
.map_err(task::Error::into_io);
future::result(r)
},
);
log.future(fut)
};
let accept_until = Cancelable {
future: accept,
canceled: false,
};
// As soon as we get a shutdown signal, the listener
// is canceled immediately.
drain_rx.watch(accept_until, |accept| {
accept.canceled = true;
})
}
/// Can cancel a future by setting a flag.
///
/// Used to 'watch' the accept futures, and close the listeners
/// as soon as the shutdown signal starts.
struct Cancelable<F> {
future: F,
canceled: bool,
}
impl<F> Future for Cancelable<F>
where
F: Future<Item=()>,
{
type Item = ();
type Error = F::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.canceled {
Ok(().into())
} else {
self.future.poll()
}
}
}
fn serve_tap<N, B>(
bound_port: BoundPort,
new_service: N,
) -> impl Future<Item = (), Error = io::Error> +'static
where
B: tower_h2::Body + Send +'static,
<B::Data as bytes::IntoBuf>::Buf: Send,
N: NewService<
Request = http::Request<tower_h2::RecvBody>,
Response = http::Response<B>
>
+ Send +'static,
tower_h2::server::Connection<
connection::Connection,
N,
::logging::ServerExecutor,
B,
()
>: Future<Item = ()>,
{
let log = logging::admin().server("tap", bound_port.local_addr());
let h2_builder = h2::server::Builder::default();
let server = tower_h2::Server::new(
new_service,
h2_builder,
log.clone().executor(),
);
let fut = {
let log = log.clone();
// TODO: serve over TLS.
bound_port.listen_and_fold(
Conditional::None(tls::ReasonForNoIdentity::NotImplementedForTap.into()),
server,
move |server, (session, remote)| {
let log = log.clone().with_remote(remote);
let serve = server.serve(session).map_err(|_| ());
let r = executor::current_thread::TaskExecutor::current()
.spawn_local(Box::new(log.future(serve)))
.map(move |_| server)
.map_err(task::Error::into_io);
future::result(r)
},
)
};
log.future(fut)
}
| {
let stack = Arc::new(NewServiceFn::new(move || {
// Clone the router handle
let router = router.clone();
// Map errors to appropriate response error codes.
let map_err = MapErr::new(router, |e| {
match e {
RouteError::Route(r) => {
error!(" turning route error: {} into 500", r);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::Inner(i) => {
error!("turning {} into 500", i);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NotRecognized => {
error!("turning route not recognized error into 500");
http::StatusCode::INTERNAL_SERVER_ERROR
} | identifier_body |
lib.rs | #![cfg_attr(feature = "cargo-clippy", allow(clone_on_ref_ptr))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#![deny(warnings)]
extern crate bytes;
extern crate conduit_proxy_controller_grpc;
extern crate env_logger;
extern crate deflate;
#[macro_use]
extern crate futures;
extern crate futures_mpsc_lossy;
extern crate futures_watch;
extern crate h2;
extern crate http;
extern crate httparse;
extern crate hyper;
#[cfg(target_os = "linux")]
extern crate inotify;
extern crate ipnet;
#[cfg(target_os = "linux")]
extern crate libc;
#[macro_use]
extern crate log;
#[cfg_attr(test, macro_use)]
extern crate indexmap;
#[cfg(target_os = "linux")]
extern crate procinfo;
extern crate prost;
extern crate prost_types;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
extern crate rand;
extern crate regex;
extern crate ring;
#[cfg(test)]
extern crate tempdir;
extern crate tokio;
extern crate tokio_connect;
extern crate tokio_timer;
extern crate tower_balance;
extern crate tower_buffer;
extern crate tower_discover;
extern crate tower_grpc;
extern crate tower_h2;
extern crate tower_h2_balance;
extern crate tower_reconnect;
extern crate tower_service;
extern crate conduit_proxy_router;
extern crate tower_util;
extern crate tower_in_flight_limit;
extern crate trust_dns_resolver;
extern crate try_lock;
use futures::*;
use std::error::Error;
use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use indexmap::IndexSet;
use tokio::{
executor::{self, DefaultExecutor, Executor},
runtime::current_thread,
};
use tower_service::NewService;
use tower_fn::*;
use conduit_proxy_router::{Recognize, Router, Error as RouteError};
pub mod app;
mod bind;
pub mod config;
mod connection;
pub mod conditional; | pub mod ctx;
mod dns;
mod drain;
pub mod fs_watch;
mod inbound;
mod logging;
mod map_err;
mod outbound;
pub mod stream;
pub mod task;
pub mod telemetry;
mod transparency;
mod transport;
pub mod timeout;
mod tower_fn; // TODO: move to tower-fn
mod watch_service; // TODO: move to tower
use bind::Bind;
use conditional::Conditional;
use connection::BoundPort;
use inbound::Inbound;
use map_err::MapErr;
use task::MainRuntime;
use transparency::{HttpBody, Server};
pub use transport::{AddrInfo, GetOriginalDst, SoOriginalDst, tls};
use outbound::Outbound;
pub use watch_service::WatchService;
/// Runs a sidecar proxy.
///
/// The proxy binds two listeners:
///
/// - a private socket (TCP or UNIX) for outbound requests to other instances;
/// - and a public socket (TCP and optionally TLS) for inbound requests from other
/// instances.
///
/// The public listener forwards requests to a local socket (TCP or UNIX).
///
/// The private listener routes requests to service-discovery-aware load-balancer.
///
pub struct Main<G> {
config: config::Config,
control_listener: BoundPort,
inbound_listener: BoundPort,
outbound_listener: BoundPort,
metrics_listener: BoundPort,
get_original_dst: G,
runtime: MainRuntime,
}
impl<G> Main<G>
where
G: GetOriginalDst + Clone + Send +'static,
{
pub fn new<R>(
config: config::Config,
get_original_dst: G,
runtime: R
) -> Self
where
R: Into<MainRuntime>,
{
let control_listener = BoundPort::new(config.control_listener.addr)
.expect("controller listener bind");
let inbound_listener = BoundPort::new(config.public_listener.addr)
.expect("public listener bind");
let outbound_listener = BoundPort::new(config.private_listener.addr)
.expect("private listener bind");
let runtime = runtime.into();
let metrics_listener = BoundPort::new(config.metrics_listener.addr)
.expect("metrics listener bind");
Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
runtime,
}
}
pub fn control_addr(&self) -> SocketAddr {
self.control_listener.local_addr()
}
pub fn inbound_addr(&self) -> SocketAddr {
self.inbound_listener.local_addr()
}
pub fn outbound_addr(&self) -> SocketAddr {
self.outbound_listener.local_addr()
}
pub fn metrics_addr(&self) -> SocketAddr {
self.metrics_listener.local_addr()
}
pub fn run_until<F>(self, shutdown_signal: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
let process_ctx = ctx::Process::new(&self.config);
let Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
mut runtime,
} = self;
let control_host_and_port = config.control_host_and_port.clone();
info!("using controller at {:?}", control_host_and_port);
info!("routing on {:?}", outbound_listener.local_addr());
info!(
"proxying on {:?} to {:?}",
inbound_listener.local_addr(),
config.private_forward
);
info!(
"serving Prometheus metrics on {:?}",
metrics_listener.local_addr(),
);
info!(
"protocol detection disabled for inbound ports {:?}",
config.inbound_ports_disable_protocol_detection,
);
info!(
"protocol detection disabled for outbound ports {:?}",
config.outbound_ports_disable_protocol_detection,
);
let (taps, observe) = control::Observe::new(100);
let (sensors, telemetry) = telemetry::new(
&process_ctx,
config.event_buffer_capacity,
config.metrics_retain_idle,
&taps,
);
let (tls_client_config, tls_server_config, tls_cfg_bg) =
tls::watch_for_config_changes(
config.tls_settings.as_ref(),
sensors.tls_config(),
);
let controller_tls = config.tls_settings.as_ref().and_then(|settings| {
settings.controller_identity.as_ref().map(|controller_identity| {
tls::ConnectionConfig {
identity: controller_identity.clone(),
config: tls_client_config.clone(),
}
})
});
let (dns_resolver, dns_bg) = dns::Resolver::from_system_config_and_env(&config)
.unwrap_or_else(|e| {
// TODO: Make DNS configuration infallible.
panic!("invalid DNS configuration: {:?}", e);
});
let (resolver, resolver_bg) = control::destination::new(
dns_resolver.clone(),
config.namespaces.clone(),
control_host_and_port,
controller_tls,
);
let (drain_tx, drain_rx) = drain::channel();
let bind = Bind::new(tls_client_config).with_sensors(sensors.clone());
// Setup the public listener. This will listen on a publicly accessible
// address and listen for inbound connections that should be forwarded
// to the managed application (private destination).
let inbound = {
let ctx = ctx::Proxy::inbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let default_addr = config.private_forward.map(|a| a.into());
let router = Router::new(
Inbound::new(default_addr, bind),
config.inbound_router_capacity,
config.inbound_router_max_idle_age,
);
let tls_settings = config.tls_settings.as_ref().map(|settings| {
tls::ConnectionConfig {
identity: settings.pod_identity.clone(),
config: tls_server_config
}
});
serve(
inbound_listener,
tls_settings,
router,
config.private_connect_timeout,
config.inbound_ports_disable_protocol_detection,
ctx,
sensors.clone(),
get_original_dst.clone(),
drain_rx.clone(),
)
};
// Setup the private listener. This will listen on a locally accessible
// address and listen for outbound requests that should be routed
// to a remote service (public destination).
let outbound = {
let ctx = ctx::Proxy::outbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let router = Router::new(
Outbound::new(bind, resolver, config.bind_timeout),
config.outbound_router_capacity,
config.outbound_router_max_idle_age,
);
serve(
outbound_listener,
Conditional::None(tls::ReasonForNoTls::InternalTraffic),
router,
config.public_connect_timeout,
config.outbound_ports_disable_protocol_detection,
ctx,
sensors,
get_original_dst,
drain_rx,
)
};
trace!("running");
let (_tx, admin_shutdown_signal) = futures::sync::oneshot::channel::<()>();
{
thread::Builder::new()
.name("admin".into())
.spawn(move || {
use conduit_proxy_controller_grpc::tap::server::TapServer;
let mut rt = current_thread::Runtime::new()
.expect("initialize admin thread runtime");
let tap = serve_tap(control_listener, TapServer::new(observe));
let metrics_server = telemetry.serve_metrics(metrics_listener);
let fut = ::logging::admin().bg("resolver").future(resolver_bg)
.join5(
::logging::admin().bg("telemetry").future(telemetry),
tap.map_err(|_| {}),
metrics_server.map_err(|_| {}),
::logging::admin().bg("dns-resolver").future(dns_bg),
)
// There's no `Future::join6` combinator...
.join(::logging::admin().bg("tls-config").future(tls_cfg_bg))
.map(|_| {});
rt.spawn(Box::new(fut));
let shutdown = admin_shutdown_signal.then(|_| Ok::<(), ()>(()));
rt.block_on(shutdown).expect("admin");
trace!("admin shutdown finished");
})
.expect("initialize controller api thread");
trace!("controller client thread spawned");
}
let fut = inbound
.join(outbound)
.map(|_| ())
.map_err(|err| error!("main error: {:?}", err));
runtime.spawn(Box::new(fut));
trace!("main task spawned");
let shutdown_signal = shutdown_signal.and_then(move |()| {
debug!("shutdown signaled");
drain_tx.drain()
});
runtime.run_until(shutdown_signal).expect("executor");
debug!("shutdown complete");
}
}
fn serve<R, B, E, F, G>(
bound_port: BoundPort,
tls_config: tls::ConditionalConnectionConfig<tls::ServerConfigWatch>,
router: Router<R>,
tcp_connect_timeout: Duration,
disable_protocol_detection_ports: IndexSet<u16>,
proxy_ctx: Arc<ctx::Proxy>,
sensors: telemetry::Sensors,
get_orig_dst: G,
drain_rx: drain::Watch,
) -> impl Future<Item = (), Error = io::Error> + Send +'static
where
B: tower_h2::Body + Default + Send +'static,
B::Data: Send,
<B::Data as ::bytes::IntoBuf>::Buf: Send,
E: Error + Send +'static,
F: Error + Send +'static,
R: Recognize<
Request = http::Request<HttpBody>,
Response = http::Response<B>,
Error = E,
RouteError = F,
>
+ Send + Sync +'static,
R::Key: Send,
R::Service: Send,
<R::Service as tower_service::Service>::Future: Send,
Router<R>: Send,
G: GetOriginalDst + Send +'static,
{
let stack = Arc::new(NewServiceFn::new(move || {
// Clone the router handle
let router = router.clone();
// Map errors to appropriate response error codes.
let map_err = MapErr::new(router, |e| {
match e {
RouteError::Route(r) => {
error!(" turning route error: {} into 500", r);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::Inner(i) => {
error!("turning {} into 500", i);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NotRecognized => {
error!("turning route not recognized error into 500");
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NoCapacity(capacity) => {
// TODO For H2 streams, we should probably signal a protocol-level
// capacity change.
error!("router at capacity ({}); returning a 503", capacity);
http::StatusCode::SERVICE_UNAVAILABLE
}
}
});
// Install the request open timestamp module at the very top
// of the stack, in order to take the timestamp as close as
// possible to the beginning of the request's lifetime.
telemetry::sensor::http::TimestampRequestOpen::new(map_err)
}));
let listen_addr = bound_port.local_addr();
let server = Server::new(
listen_addr,
proxy_ctx.clone(),
sensors,
get_orig_dst,
stack,
tcp_connect_timeout,
disable_protocol_detection_ports,
drain_rx.clone(),
);
let log = server.log().clone();
let accept = {
let fut = bound_port.listen_and_fold(
tls_config,
(),
move |(), (connection, remote_addr)| {
let s = server.serve(connection, remote_addr);
// Logging context is configured by the server.
let r = DefaultExecutor::current()
.spawn(Box::new(s))
.map_err(task::Error::into_io);
future::result(r)
},
);
log.future(fut)
};
let accept_until = Cancelable {
future: accept,
canceled: false,
};
// As soon as we get a shutdown signal, the listener
// is canceled immediately.
drain_rx.watch(accept_until, |accept| {
accept.canceled = true;
})
}
/// Can cancel a future by setting a flag.
///
/// Used to 'watch' the accept futures, and close the listeners
/// as soon as the shutdown signal starts.
struct Cancelable<F> {
future: F,
canceled: bool,
}
impl<F> Future for Cancelable<F>
where
F: Future<Item=()>,
{
type Item = ();
type Error = F::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.canceled {
Ok(().into())
} else {
self.future.poll()
}
}
}
fn serve_tap<N, B>(
bound_port: BoundPort,
new_service: N,
) -> impl Future<Item = (), Error = io::Error> +'static
where
B: tower_h2::Body + Send +'static,
<B::Data as bytes::IntoBuf>::Buf: Send,
N: NewService<
Request = http::Request<tower_h2::RecvBody>,
Response = http::Response<B>
>
+ Send +'static,
tower_h2::server::Connection<
connection::Connection,
N,
::logging::ServerExecutor,
B,
()
>: Future<Item = ()>,
{
let log = logging::admin().server("tap", bound_port.local_addr());
let h2_builder = h2::server::Builder::default();
let server = tower_h2::Server::new(
new_service,
h2_builder,
log.clone().executor(),
);
let fut = {
let log = log.clone();
// TODO: serve over TLS.
bound_port.listen_and_fold(
Conditional::None(tls::ReasonForNoIdentity::NotImplementedForTap.into()),
server,
move |server, (session, remote)| {
let log = log.clone().with_remote(remote);
let serve = server.serve(session).map_err(|_| ());
let r = executor::current_thread::TaskExecutor::current()
.spawn_local(Box::new(log.future(serve)))
.map(move |_| server)
.map_err(task::Error::into_io);
future::result(r)
},
)
};
log.future(fut)
} | pub mod control;
pub mod convert; | random_line_split |
lib.rs | #![cfg_attr(feature = "cargo-clippy", allow(clone_on_ref_ptr))]
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#![deny(warnings)]
extern crate bytes;
extern crate conduit_proxy_controller_grpc;
extern crate env_logger;
extern crate deflate;
#[macro_use]
extern crate futures;
extern crate futures_mpsc_lossy;
extern crate futures_watch;
extern crate h2;
extern crate http;
extern crate httparse;
extern crate hyper;
#[cfg(target_os = "linux")]
extern crate inotify;
extern crate ipnet;
#[cfg(target_os = "linux")]
extern crate libc;
#[macro_use]
extern crate log;
#[cfg_attr(test, macro_use)]
extern crate indexmap;
#[cfg(target_os = "linux")]
extern crate procinfo;
extern crate prost;
extern crate prost_types;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
extern crate rand;
extern crate regex;
extern crate ring;
#[cfg(test)]
extern crate tempdir;
extern crate tokio;
extern crate tokio_connect;
extern crate tokio_timer;
extern crate tower_balance;
extern crate tower_buffer;
extern crate tower_discover;
extern crate tower_grpc;
extern crate tower_h2;
extern crate tower_h2_balance;
extern crate tower_reconnect;
extern crate tower_service;
extern crate conduit_proxy_router;
extern crate tower_util;
extern crate tower_in_flight_limit;
extern crate trust_dns_resolver;
extern crate try_lock;
use futures::*;
use std::error::Error;
use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use indexmap::IndexSet;
use tokio::{
executor::{self, DefaultExecutor, Executor},
runtime::current_thread,
};
use tower_service::NewService;
use tower_fn::*;
use conduit_proxy_router::{Recognize, Router, Error as RouteError};
pub mod app;
mod bind;
pub mod config;
mod connection;
pub mod conditional;
pub mod control;
pub mod convert;
pub mod ctx;
mod dns;
mod drain;
pub mod fs_watch;
mod inbound;
mod logging;
mod map_err;
mod outbound;
pub mod stream;
pub mod task;
pub mod telemetry;
mod transparency;
mod transport;
pub mod timeout;
mod tower_fn; // TODO: move to tower-fn
mod watch_service; // TODO: move to tower
use bind::Bind;
use conditional::Conditional;
use connection::BoundPort;
use inbound::Inbound;
use map_err::MapErr;
use task::MainRuntime;
use transparency::{HttpBody, Server};
pub use transport::{AddrInfo, GetOriginalDst, SoOriginalDst, tls};
use outbound::Outbound;
pub use watch_service::WatchService;
/// Runs a sidecar proxy.
///
/// The proxy binds two listeners:
///
/// - a private socket (TCP or UNIX) for outbound requests to other instances;
/// - and a public socket (TCP and optionally TLS) for inbound requests from other
/// instances.
///
/// The public listener forwards requests to a local socket (TCP or UNIX).
///
/// The private listener routes requests to service-discovery-aware load-balancer.
///
pub struct Main<G> {
config: config::Config,
control_listener: BoundPort,
inbound_listener: BoundPort,
outbound_listener: BoundPort,
metrics_listener: BoundPort,
get_original_dst: G,
runtime: MainRuntime,
}
impl<G> Main<G>
where
G: GetOriginalDst + Clone + Send +'static,
{
pub fn new<R>(
config: config::Config,
get_original_dst: G,
runtime: R
) -> Self
where
R: Into<MainRuntime>,
{
let control_listener = BoundPort::new(config.control_listener.addr)
.expect("controller listener bind");
let inbound_listener = BoundPort::new(config.public_listener.addr)
.expect("public listener bind");
let outbound_listener = BoundPort::new(config.private_listener.addr)
.expect("private listener bind");
let runtime = runtime.into();
let metrics_listener = BoundPort::new(config.metrics_listener.addr)
.expect("metrics listener bind");
Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
runtime,
}
}
pub fn control_addr(&self) -> SocketAddr {
self.control_listener.local_addr()
}
pub fn | (&self) -> SocketAddr {
self.inbound_listener.local_addr()
}
pub fn outbound_addr(&self) -> SocketAddr {
self.outbound_listener.local_addr()
}
pub fn metrics_addr(&self) -> SocketAddr {
self.metrics_listener.local_addr()
}
pub fn run_until<F>(self, shutdown_signal: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
let process_ctx = ctx::Process::new(&self.config);
let Main {
config,
control_listener,
inbound_listener,
outbound_listener,
metrics_listener,
get_original_dst,
mut runtime,
} = self;
let control_host_and_port = config.control_host_and_port.clone();
info!("using controller at {:?}", control_host_and_port);
info!("routing on {:?}", outbound_listener.local_addr());
info!(
"proxying on {:?} to {:?}",
inbound_listener.local_addr(),
config.private_forward
);
info!(
"serving Prometheus metrics on {:?}",
metrics_listener.local_addr(),
);
info!(
"protocol detection disabled for inbound ports {:?}",
config.inbound_ports_disable_protocol_detection,
);
info!(
"protocol detection disabled for outbound ports {:?}",
config.outbound_ports_disable_protocol_detection,
);
let (taps, observe) = control::Observe::new(100);
let (sensors, telemetry) = telemetry::new(
&process_ctx,
config.event_buffer_capacity,
config.metrics_retain_idle,
&taps,
);
let (tls_client_config, tls_server_config, tls_cfg_bg) =
tls::watch_for_config_changes(
config.tls_settings.as_ref(),
sensors.tls_config(),
);
let controller_tls = config.tls_settings.as_ref().and_then(|settings| {
settings.controller_identity.as_ref().map(|controller_identity| {
tls::ConnectionConfig {
identity: controller_identity.clone(),
config: tls_client_config.clone(),
}
})
});
let (dns_resolver, dns_bg) = dns::Resolver::from_system_config_and_env(&config)
.unwrap_or_else(|e| {
// TODO: Make DNS configuration infallible.
panic!("invalid DNS configuration: {:?}", e);
});
let (resolver, resolver_bg) = control::destination::new(
dns_resolver.clone(),
config.namespaces.clone(),
control_host_and_port,
controller_tls,
);
let (drain_tx, drain_rx) = drain::channel();
let bind = Bind::new(tls_client_config).with_sensors(sensors.clone());
// Setup the public listener. This will listen on a publicly accessible
// address and listen for inbound connections that should be forwarded
// to the managed application (private destination).
let inbound = {
let ctx = ctx::Proxy::inbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let default_addr = config.private_forward.map(|a| a.into());
let router = Router::new(
Inbound::new(default_addr, bind),
config.inbound_router_capacity,
config.inbound_router_max_idle_age,
);
let tls_settings = config.tls_settings.as_ref().map(|settings| {
tls::ConnectionConfig {
identity: settings.pod_identity.clone(),
config: tls_server_config
}
});
serve(
inbound_listener,
tls_settings,
router,
config.private_connect_timeout,
config.inbound_ports_disable_protocol_detection,
ctx,
sensors.clone(),
get_original_dst.clone(),
drain_rx.clone(),
)
};
// Setup the private listener. This will listen on a locally accessible
// address and listen for outbound requests that should be routed
// to a remote service (public destination).
let outbound = {
let ctx = ctx::Proxy::outbound(&process_ctx);
let bind = bind.clone().with_ctx(ctx.clone());
let router = Router::new(
Outbound::new(bind, resolver, config.bind_timeout),
config.outbound_router_capacity,
config.outbound_router_max_idle_age,
);
serve(
outbound_listener,
Conditional::None(tls::ReasonForNoTls::InternalTraffic),
router,
config.public_connect_timeout,
config.outbound_ports_disable_protocol_detection,
ctx,
sensors,
get_original_dst,
drain_rx,
)
};
trace!("running");
let (_tx, admin_shutdown_signal) = futures::sync::oneshot::channel::<()>();
{
thread::Builder::new()
.name("admin".into())
.spawn(move || {
use conduit_proxy_controller_grpc::tap::server::TapServer;
let mut rt = current_thread::Runtime::new()
.expect("initialize admin thread runtime");
let tap = serve_tap(control_listener, TapServer::new(observe));
let metrics_server = telemetry.serve_metrics(metrics_listener);
let fut = ::logging::admin().bg("resolver").future(resolver_bg)
.join5(
::logging::admin().bg("telemetry").future(telemetry),
tap.map_err(|_| {}),
metrics_server.map_err(|_| {}),
::logging::admin().bg("dns-resolver").future(dns_bg),
)
// There's no `Future::join6` combinator...
.join(::logging::admin().bg("tls-config").future(tls_cfg_bg))
.map(|_| {});
rt.spawn(Box::new(fut));
let shutdown = admin_shutdown_signal.then(|_| Ok::<(), ()>(()));
rt.block_on(shutdown).expect("admin");
trace!("admin shutdown finished");
})
.expect("initialize controller api thread");
trace!("controller client thread spawned");
}
let fut = inbound
.join(outbound)
.map(|_| ())
.map_err(|err| error!("main error: {:?}", err));
runtime.spawn(Box::new(fut));
trace!("main task spawned");
let shutdown_signal = shutdown_signal.and_then(move |()| {
debug!("shutdown signaled");
drain_tx.drain()
});
runtime.run_until(shutdown_signal).expect("executor");
debug!("shutdown complete");
}
}
fn serve<R, B, E, F, G>(
bound_port: BoundPort,
tls_config: tls::ConditionalConnectionConfig<tls::ServerConfigWatch>,
router: Router<R>,
tcp_connect_timeout: Duration,
disable_protocol_detection_ports: IndexSet<u16>,
proxy_ctx: Arc<ctx::Proxy>,
sensors: telemetry::Sensors,
get_orig_dst: G,
drain_rx: drain::Watch,
) -> impl Future<Item = (), Error = io::Error> + Send +'static
where
B: tower_h2::Body + Default + Send +'static,
B::Data: Send,
<B::Data as ::bytes::IntoBuf>::Buf: Send,
E: Error + Send +'static,
F: Error + Send +'static,
R: Recognize<
Request = http::Request<HttpBody>,
Response = http::Response<B>,
Error = E,
RouteError = F,
>
+ Send + Sync +'static,
R::Key: Send,
R::Service: Send,
<R::Service as tower_service::Service>::Future: Send,
Router<R>: Send,
G: GetOriginalDst + Send +'static,
{
let stack = Arc::new(NewServiceFn::new(move || {
// Clone the router handle
let router = router.clone();
// Map errors to appropriate response error codes.
let map_err = MapErr::new(router, |e| {
match e {
RouteError::Route(r) => {
error!(" turning route error: {} into 500", r);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::Inner(i) => {
error!("turning {} into 500", i);
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NotRecognized => {
error!("turning route not recognized error into 500");
http::StatusCode::INTERNAL_SERVER_ERROR
}
RouteError::NoCapacity(capacity) => {
// TODO For H2 streams, we should probably signal a protocol-level
// capacity change.
error!("router at capacity ({}); returning a 503", capacity);
http::StatusCode::SERVICE_UNAVAILABLE
}
}
});
// Install the request open timestamp module at the very top
// of the stack, in order to take the timestamp as close as
// possible to the beginning of the request's lifetime.
telemetry::sensor::http::TimestampRequestOpen::new(map_err)
}));
let listen_addr = bound_port.local_addr();
let server = Server::new(
listen_addr,
proxy_ctx.clone(),
sensors,
get_orig_dst,
stack,
tcp_connect_timeout,
disable_protocol_detection_ports,
drain_rx.clone(),
);
let log = server.log().clone();
let accept = {
let fut = bound_port.listen_and_fold(
tls_config,
(),
move |(), (connection, remote_addr)| {
let s = server.serve(connection, remote_addr);
// Logging context is configured by the server.
let r = DefaultExecutor::current()
.spawn(Box::new(s))
.map_err(task::Error::into_io);
future::result(r)
},
);
log.future(fut)
};
let accept_until = Cancelable {
future: accept,
canceled: false,
};
// As soon as we get a shutdown signal, the listener
// is canceled immediately.
drain_rx.watch(accept_until, |accept| {
accept.canceled = true;
})
}
/// Can cancel a future by setting a flag.
///
/// Used to 'watch' the accept futures, and close the listeners
/// as soon as the shutdown signal starts.
struct Cancelable<F> {
future: F,
canceled: bool,
}
impl<F> Future for Cancelable<F>
where
F: Future<Item=()>,
{
type Item = ();
type Error = F::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.canceled {
Ok(().into())
} else {
self.future.poll()
}
}
}
fn serve_tap<N, B>(
bound_port: BoundPort,
new_service: N,
) -> impl Future<Item = (), Error = io::Error> +'static
where
B: tower_h2::Body + Send +'static,
<B::Data as bytes::IntoBuf>::Buf: Send,
N: NewService<
Request = http::Request<tower_h2::RecvBody>,
Response = http::Response<B>
>
+ Send +'static,
tower_h2::server::Connection<
connection::Connection,
N,
::logging::ServerExecutor,
B,
()
>: Future<Item = ()>,
{
let log = logging::admin().server("tap", bound_port.local_addr());
let h2_builder = h2::server::Builder::default();
let server = tower_h2::Server::new(
new_service,
h2_builder,
log.clone().executor(),
);
let fut = {
let log = log.clone();
// TODO: serve over TLS.
bound_port.listen_and_fold(
Conditional::None(tls::ReasonForNoIdentity::NotImplementedForTap.into()),
server,
move |server, (session, remote)| {
let log = log.clone().with_remote(remote);
let serve = server.serve(session).map_err(|_| ());
let r = executor::current_thread::TaskExecutor::current()
.spawn_local(Box::new(log.future(serve)))
.map(move |_| server)
.map_err(task::Error::into_io);
future::result(r)
},
)
};
log.future(fut)
}
| inbound_addr | identifier_name |
cht.rs | // This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Canonical hash trie definitions and helper functions.
//!
//! Each CHT is a trie mapping block numbers to canonical hash.
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
//! request an inclusion proof of a specific block number against the trie with the
//! root has. A correct proof implies that the claimed block is identical to the one
//! we discarded.
use codec::Encode;
use hash_db;
use sp_trie;
use sp_core::{convert_hash, H256};
use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero};
use sp_state_machine::{
prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend,
Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend,
};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
/// The size of each CHT. This value is passed to every CHT-related function from
/// production code. Other values are passed from tests.
const SIZE: u32 = 2048;
/// Gets default CHT size.
pub fn size<N: From<u32>>() -> N {
SIZE.into()
}
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is
/// canonized.
pub fn | <N>(cht_size: N, block_num: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?;
let two = N::one() + N::one();
if block_cht_num < two {
return None
}
let cht_start = start_number(cht_size, block_cht_num.clone());
if cht_start!= block_num {
return None
}
Some(block_cht_num - two)
}
/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number.
pub fn max_cht_number<N>(cht_size: N, max_canonical_block: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?;
let two = N::one() + N::one();
if max_cht_number < two {
return None
}
Some(max_cht_number - two)
}
/// Compute a CHT root from an iterator of block hashes. Fails if shorter than
/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
/// Discards the trie's nodes.
pub fn compute_root<Header, Hasher, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Hasher::Out>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
use sp_trie::TrieConfiguration;
Ok(sp_trie::trie_types::Layout::<Hasher>::trie_root(build_pairs::<Header, I>(
cht_size, cht_num, hashes,
)?))
}
/// Build CHT-based header proof.
pub fn build_proof<Header, Hasher, BlocksI, HashesI>(
cht_size: Header::Number,
cht_num: Header::Number,
blocks: BlocksI,
hashes: HashesI,
) -> ClientResult<StorageProof>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
BlocksI: IntoIterator<Item = Header::Number>,
HashesI: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let transaction = build_pairs::<Header, _>(cht_size, cht_num, hashes)?
.into_iter()
.map(|(k, v)| (k, Some(v)))
.collect::<Vec<_>>();
let mut storage = InMemoryBackend::<Hasher>::default().update(vec![(None, transaction)]);
let trie_storage =
storage.as_trie_backend().expect("InMemoryState::as_trie_backend always returns Some; qed");
prove_read_on_trie_backend(
trie_storage,
blocks.into_iter().map(|number| encode_cht_key(number)),
)
.map_err(ClientError::Execution)
}
/// Check CHT-based header proof.
pub fn check_proof<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
remote_proof: StorageProof,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
move |local_root, local_cht_key| {
read_proof_check::<Hasher, _>(
local_root,
remote_proof,
::std::iter::once(local_cht_key),
)
.map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed"))
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof on pre-created proving backend.
pub fn check_proof_on_proving_backend<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
proving_backend: &TrieBackend<MemoryDB<Hasher>, Hasher>,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
|_, local_cht_key| {
read_proof_check_on_proving_backend::<Hasher>(proving_backend, local_cht_key)
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof using passed checker function.
fn do_check_proof<Header, Hasher, F>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
checker: F,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
F: FnOnce(Hasher::Out, &[u8]) -> ClientResult<Option<Vec<u8>>>,
{
let root: Hasher::Out = convert_hash(&local_root);
let local_cht_key = encode_cht_key(local_number);
let local_cht_value = checker(root, &local_cht_key)?;
let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?;
let local_hash =
decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?;
match &local_hash[..] == remote_hash.as_ref() {
true => Ok(()),
false => Err(ClientError::InvalidCHTProof.into()),
}
}
/// Group ordered blocks by CHT number and call functor with blocks of each group.
pub fn for_each_cht_group<Header, I, F, P>(
cht_size: Header::Number,
blocks: I,
mut functor: F,
mut functor_param: P,
) -> ClientResult<()>
where
Header: HeaderT,
I: IntoIterator<Item = Header::Number>,
F: FnMut(P, Header::Number, Vec<Header::Number>) -> ClientResult<P>,
{
let mut current_cht_num = None;
let mut current_cht_blocks = Vec::new();
for block in blocks {
let new_cht_num = match block_to_cht_number(cht_size, block) {
Some(new_cht_num) => new_cht_num,
None =>
return Err(ClientError::Backend(format!(
"Cannot compute CHT root for the block #{}",
block
))
.into()),
};
let advance_to_next_cht = current_cht_num.is_some() && current_cht_num!= Some(new_cht_num);
if advance_to_next_cht {
let current_cht_num = current_cht_num.expect(
"advance_to_next_cht is true;
it is true only when current_cht_num is Some; qed",
);
assert!(
new_cht_num > current_cht_num,
"for_each_cht_group only supports ordered iterators"
);
functor_param =
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
current_cht_blocks.push(block);
current_cht_num = Some(new_cht_num);
}
if let Some(current_cht_num) = current_cht_num {
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
Ok(())
}
/// Build pairs for computing CHT.
fn build_pairs<Header, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Vec<(Vec<u8>, Vec<u8>)>>
where
Header: HeaderT,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let start_num = start_number(cht_size, cht_num);
let mut pairs = Vec::new();
let mut hash_index = Header::Number::zero();
for hash in hashes.into_iter() {
let hash =
hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?;
pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash)));
hash_index += Header::Number::one();
if hash_index == cht_size {
break
}
}
if hash_index == cht_size {
Ok(pairs)
} else {
Err(ClientError::MissingHashRequiredForCHT)
}
}
/// Get the starting block of a given CHT.
/// CHT 0 includes block 1...SIZE,
/// CHT 1 includes block SIZE + 1... 2*SIZE
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
/// This is because the genesis hash is assumed to be known
/// and including it would be redundant.
pub fn start_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num * cht_size) + N::one()
}
/// Get the ending block of a given CHT.
pub fn end_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num + N::one()) * cht_size
}
/// Convert a block number to a CHT number.
/// Returns `None` for `block_num` == 0, `Some` otherwise.
pub fn block_to_cht_number<N: AtLeast32Bit>(cht_size: N, block_num: N) -> Option<N> {
if block_num == N::zero() {
None
} else {
Some((block_num - N::one()) / cht_size)
}
}
/// Convert header number into CHT key.
pub fn encode_cht_key<N: Encode>(number: N) -> Vec<u8> {
number.encode()
}
/// Convert header hash into CHT value.
fn encode_cht_value<Hash: AsRef<[u8]>>(hash: Hash) -> Vec<u8> {
hash.as_ref().to_vec()
}
/// Convert CHT value into block header hash.
pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
match value.len() {
32 => Some(H256::from_slice(&value[0..32])),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{generic, traits::BlakeTwo256};
type Header = generic::Header<u64, BlakeTwo256>;
#[test]
fn is_build_required_works() {
assert_eq!(is_build_required(SIZE, 0u32.into()), None);
assert_eq!(is_build_required(SIZE, 1u32.into()), None);
assert_eq!(is_build_required(SIZE, SIZE), None);
assert_eq!(is_build_required(SIZE, SIZE + 1), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None);
}
#[test]
fn max_cht_number_works() {
assert_eq!(max_cht_number(SIZE, 0u32.into()), None);
assert_eq!(max_cht_number(SIZE, 1u32.into()), None);
assert_eq!(max_cht_number(SIZE, SIZE), None);
assert_eq!(max_cht_number(SIZE, SIZE + 1), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1));
}
#[test]
fn start_number_works() {
assert_eq!(start_number(SIZE, 0u32), 1u32);
assert_eq!(start_number(SIZE, 1u32), SIZE + 1);
assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1);
}
#[test]
fn end_number_works() {
assert_eq!(end_number(SIZE, 0u32), SIZE);
assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE);
assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE);
}
#[test]
fn build_pairs_fails_when_no_enough_blocks() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)
)
.is_err());
}
#[test]
fn build_pairs_fails_when_missing_block() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
.take(SIZE as usize / 2)
.chain(::std::iter::once(Ok(None)))
.chain(
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2))))
.take(SIZE as usize / 2 - 1)
)
)
.is_err());
}
#[test]
fn compute_root_works() {
assert!(compute_root::<Header, BlakeTwo256, _>(
SIZE as _,
42,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn build_proof_panics_when_querying_wrong_block() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE * 1000) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_err());
}
#[test]
fn build_proof_works() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE / 2) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn for_each_cht_group_panics() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![cht_size * 5, cht_size * 2],
|_, _, _| Ok(()),
(),
);
}
#[test]
fn for_each_cht_group_works() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![
cht_size * 2 + 1,
cht_size * 2 + 2,
cht_size * 2 + 5,
cht_size * 4 + 1,
cht_size * 4 + 7,
cht_size * 6 + 1,
],
|_, cht_num, blocks| {
match cht_num {
2 => assert_eq!(
blocks,
vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]
),
4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]),
6 => assert_eq!(blocks, vec![cht_size * 6 + 1]),
_ => unreachable!(),
}
Ok(())
},
(),
);
}
}
| is_build_required | identifier_name |
cht.rs | // This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Canonical hash trie definitions and helper functions.
//!
//! Each CHT is a trie mapping block numbers to canonical hash.
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
//! request an inclusion proof of a specific block number against the trie with the
//! root has. A correct proof implies that the claimed block is identical to the one
//! we discarded.
use codec::Encode;
use hash_db;
use sp_trie;
use sp_core::{convert_hash, H256};
use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero};
use sp_state_machine::{
prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend,
Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend,
};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
/// The size of each CHT. This value is passed to every CHT-related function from
/// production code. Other values are passed from tests.
const SIZE: u32 = 2048;
/// Gets default CHT size.
pub fn size<N: From<u32>>() -> N |
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is
/// canonized.
pub fn is_build_required<N>(cht_size: N, block_num: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?;
let two = N::one() + N::one();
if block_cht_num < two {
return None
}
let cht_start = start_number(cht_size, block_cht_num.clone());
if cht_start!= block_num {
return None
}
Some(block_cht_num - two)
}
/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number.
pub fn max_cht_number<N>(cht_size: N, max_canonical_block: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?;
let two = N::one() + N::one();
if max_cht_number < two {
return None
}
Some(max_cht_number - two)
}
/// Compute a CHT root from an iterator of block hashes. Fails if shorter than
/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
/// Discards the trie's nodes.
pub fn compute_root<Header, Hasher, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Hasher::Out>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
use sp_trie::TrieConfiguration;
Ok(sp_trie::trie_types::Layout::<Hasher>::trie_root(build_pairs::<Header, I>(
cht_size, cht_num, hashes,
)?))
}
/// Build CHT-based header proof.
pub fn build_proof<Header, Hasher, BlocksI, HashesI>(
cht_size: Header::Number,
cht_num: Header::Number,
blocks: BlocksI,
hashes: HashesI,
) -> ClientResult<StorageProof>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
BlocksI: IntoIterator<Item = Header::Number>,
HashesI: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let transaction = build_pairs::<Header, _>(cht_size, cht_num, hashes)?
.into_iter()
.map(|(k, v)| (k, Some(v)))
.collect::<Vec<_>>();
let mut storage = InMemoryBackend::<Hasher>::default().update(vec![(None, transaction)]);
let trie_storage =
storage.as_trie_backend().expect("InMemoryState::as_trie_backend always returns Some; qed");
prove_read_on_trie_backend(
trie_storage,
blocks.into_iter().map(|number| encode_cht_key(number)),
)
.map_err(ClientError::Execution)
}
/// Check CHT-based header proof.
pub fn check_proof<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
remote_proof: StorageProof,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
move |local_root, local_cht_key| {
read_proof_check::<Hasher, _>(
local_root,
remote_proof,
::std::iter::once(local_cht_key),
)
.map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed"))
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof on pre-created proving backend.
pub fn check_proof_on_proving_backend<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
proving_backend: &TrieBackend<MemoryDB<Hasher>, Hasher>,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
|_, local_cht_key| {
read_proof_check_on_proving_backend::<Hasher>(proving_backend, local_cht_key)
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof using passed checker function.
fn do_check_proof<Header, Hasher, F>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
checker: F,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
F: FnOnce(Hasher::Out, &[u8]) -> ClientResult<Option<Vec<u8>>>,
{
let root: Hasher::Out = convert_hash(&local_root);
let local_cht_key = encode_cht_key(local_number);
let local_cht_value = checker(root, &local_cht_key)?;
let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?;
let local_hash =
decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?;
match &local_hash[..] == remote_hash.as_ref() {
true => Ok(()),
false => Err(ClientError::InvalidCHTProof.into()),
}
}
/// Group ordered blocks by CHT number and call functor with blocks of each group.
pub fn for_each_cht_group<Header, I, F, P>(
cht_size: Header::Number,
blocks: I,
mut functor: F,
mut functor_param: P,
) -> ClientResult<()>
where
Header: HeaderT,
I: IntoIterator<Item = Header::Number>,
F: FnMut(P, Header::Number, Vec<Header::Number>) -> ClientResult<P>,
{
let mut current_cht_num = None;
let mut current_cht_blocks = Vec::new();
for block in blocks {
let new_cht_num = match block_to_cht_number(cht_size, block) {
Some(new_cht_num) => new_cht_num,
None =>
return Err(ClientError::Backend(format!(
"Cannot compute CHT root for the block #{}",
block
))
.into()),
};
let advance_to_next_cht = current_cht_num.is_some() && current_cht_num!= Some(new_cht_num);
if advance_to_next_cht {
let current_cht_num = current_cht_num.expect(
"advance_to_next_cht is true;
it is true only when current_cht_num is Some; qed",
);
assert!(
new_cht_num > current_cht_num,
"for_each_cht_group only supports ordered iterators"
);
functor_param =
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
current_cht_blocks.push(block);
current_cht_num = Some(new_cht_num);
}
if let Some(current_cht_num) = current_cht_num {
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
Ok(())
}
/// Build pairs for computing CHT.
fn build_pairs<Header, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Vec<(Vec<u8>, Vec<u8>)>>
where
Header: HeaderT,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let start_num = start_number(cht_size, cht_num);
let mut pairs = Vec::new();
let mut hash_index = Header::Number::zero();
for hash in hashes.into_iter() {
let hash =
hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?;
pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash)));
hash_index += Header::Number::one();
if hash_index == cht_size {
break
}
}
if hash_index == cht_size {
Ok(pairs)
} else {
Err(ClientError::MissingHashRequiredForCHT)
}
}
/// Get the starting block of a given CHT.
/// CHT 0 includes block 1...SIZE,
/// CHT 1 includes block SIZE + 1... 2*SIZE
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
/// This is because the genesis hash is assumed to be known
/// and including it would be redundant.
pub fn start_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num * cht_size) + N::one()
}
/// Get the ending block of a given CHT.
pub fn end_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num + N::one()) * cht_size
}
/// Convert a block number to a CHT number.
/// Returns `None` for `block_num` == 0, `Some` otherwise.
pub fn block_to_cht_number<N: AtLeast32Bit>(cht_size: N, block_num: N) -> Option<N> {
if block_num == N::zero() {
None
} else {
Some((block_num - N::one()) / cht_size)
}
}
/// Convert header number into CHT key.
pub fn encode_cht_key<N: Encode>(number: N) -> Vec<u8> {
number.encode()
}
/// Convert header hash into CHT value.
fn encode_cht_value<Hash: AsRef<[u8]>>(hash: Hash) -> Vec<u8> {
hash.as_ref().to_vec()
}
/// Convert CHT value into block header hash.
pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
match value.len() {
32 => Some(H256::from_slice(&value[0..32])),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{generic, traits::BlakeTwo256};
type Header = generic::Header<u64, BlakeTwo256>;
#[test]
fn is_build_required_works() {
assert_eq!(is_build_required(SIZE, 0u32.into()), None);
assert_eq!(is_build_required(SIZE, 1u32.into()), None);
assert_eq!(is_build_required(SIZE, SIZE), None);
assert_eq!(is_build_required(SIZE, SIZE + 1), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None);
}
#[test]
fn max_cht_number_works() {
assert_eq!(max_cht_number(SIZE, 0u32.into()), None);
assert_eq!(max_cht_number(SIZE, 1u32.into()), None);
assert_eq!(max_cht_number(SIZE, SIZE), None);
assert_eq!(max_cht_number(SIZE, SIZE + 1), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1));
}
#[test]
fn start_number_works() {
assert_eq!(start_number(SIZE, 0u32), 1u32);
assert_eq!(start_number(SIZE, 1u32), SIZE + 1);
assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1);
}
#[test]
fn end_number_works() {
assert_eq!(end_number(SIZE, 0u32), SIZE);
assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE);
assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE);
}
#[test]
fn build_pairs_fails_when_no_enough_blocks() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)
)
.is_err());
}
#[test]
fn build_pairs_fails_when_missing_block() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
.take(SIZE as usize / 2)
.chain(::std::iter::once(Ok(None)))
.chain(
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2))))
.take(SIZE as usize / 2 - 1)
)
)
.is_err());
}
#[test]
fn compute_root_works() {
assert!(compute_root::<Header, BlakeTwo256, _>(
SIZE as _,
42,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn build_proof_panics_when_querying_wrong_block() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE * 1000) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_err());
}
#[test]
fn build_proof_works() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE / 2) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn for_each_cht_group_panics() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![cht_size * 5, cht_size * 2],
|_, _, _| Ok(()),
(),
);
}
#[test]
fn for_each_cht_group_works() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![
cht_size * 2 + 1,
cht_size * 2 + 2,
cht_size * 2 + 5,
cht_size * 4 + 1,
cht_size * 4 + 7,
cht_size * 6 + 1,
],
|_, cht_num, blocks| {
match cht_num {
2 => assert_eq!(
blocks,
vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]
),
4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]),
6 => assert_eq!(blocks, vec![cht_size * 6 + 1]),
_ => unreachable!(),
}
Ok(())
},
(),
);
}
}
| {
SIZE.into()
} | identifier_body |
cht.rs | // This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Canonical hash trie definitions and helper functions.
//!
//! Each CHT is a trie mapping block numbers to canonical hash.
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
//! request an inclusion proof of a specific block number against the trie with the
//! root has. A correct proof implies that the claimed block is identical to the one
//! we discarded.
use codec::Encode;
use hash_db;
use sp_trie;
use sp_core::{convert_hash, H256};
use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero};
use sp_state_machine::{
prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend,
Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend,
};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
/// The size of each CHT. This value is passed to every CHT-related function from
/// production code. Other values are passed from tests.
const SIZE: u32 = 2048;
/// Gets default CHT size.
pub fn size<N: From<u32>>() -> N {
SIZE.into()
}
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is
/// canonized.
pub fn is_build_required<N>(cht_size: N, block_num: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?;
let two = N::one() + N::one();
if block_cht_num < two |
let cht_start = start_number(cht_size, block_cht_num.clone());
if cht_start!= block_num {
return None
}
Some(block_cht_num - two)
}
/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number.
pub fn max_cht_number<N>(cht_size: N, max_canonical_block: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?;
let two = N::one() + N::one();
if max_cht_number < two {
return None
}
Some(max_cht_number - two)
}
/// Compute a CHT root from an iterator of block hashes. Fails if shorter than
/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
/// Discards the trie's nodes.
pub fn compute_root<Header, Hasher, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Hasher::Out>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
use sp_trie::TrieConfiguration;
Ok(sp_trie::trie_types::Layout::<Hasher>::trie_root(build_pairs::<Header, I>(
cht_size, cht_num, hashes,
)?))
}
/// Build CHT-based header proof.
pub fn build_proof<Header, Hasher, BlocksI, HashesI>(
cht_size: Header::Number,
cht_num: Header::Number,
blocks: BlocksI,
hashes: HashesI,
) -> ClientResult<StorageProof>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
BlocksI: IntoIterator<Item = Header::Number>,
HashesI: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let transaction = build_pairs::<Header, _>(cht_size, cht_num, hashes)?
.into_iter()
.map(|(k, v)| (k, Some(v)))
.collect::<Vec<_>>();
let mut storage = InMemoryBackend::<Hasher>::default().update(vec![(None, transaction)]);
let trie_storage =
storage.as_trie_backend().expect("InMemoryState::as_trie_backend always returns Some; qed");
prove_read_on_trie_backend(
trie_storage,
blocks.into_iter().map(|number| encode_cht_key(number)),
)
.map_err(ClientError::Execution)
}
/// Check CHT-based header proof.
pub fn check_proof<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
remote_proof: StorageProof,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
move |local_root, local_cht_key| {
read_proof_check::<Hasher, _>(
local_root,
remote_proof,
::std::iter::once(local_cht_key),
)
.map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed"))
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof on pre-created proving backend.
pub fn check_proof_on_proving_backend<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
proving_backend: &TrieBackend<MemoryDB<Hasher>, Hasher>,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
|_, local_cht_key| {
read_proof_check_on_proving_backend::<Hasher>(proving_backend, local_cht_key)
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof using passed checker function.
fn do_check_proof<Header, Hasher, F>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
checker: F,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
F: FnOnce(Hasher::Out, &[u8]) -> ClientResult<Option<Vec<u8>>>,
{
let root: Hasher::Out = convert_hash(&local_root);
let local_cht_key = encode_cht_key(local_number);
let local_cht_value = checker(root, &local_cht_key)?;
let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?;
let local_hash =
decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?;
match &local_hash[..] == remote_hash.as_ref() {
true => Ok(()),
false => Err(ClientError::InvalidCHTProof.into()),
}
}
/// Group ordered blocks by CHT number and call functor with blocks of each group.
pub fn for_each_cht_group<Header, I, F, P>(
cht_size: Header::Number,
blocks: I,
mut functor: F,
mut functor_param: P,
) -> ClientResult<()>
where
Header: HeaderT,
I: IntoIterator<Item = Header::Number>,
F: FnMut(P, Header::Number, Vec<Header::Number>) -> ClientResult<P>,
{
let mut current_cht_num = None;
let mut current_cht_blocks = Vec::new();
for block in blocks {
let new_cht_num = match block_to_cht_number(cht_size, block) {
Some(new_cht_num) => new_cht_num,
None =>
return Err(ClientError::Backend(format!(
"Cannot compute CHT root for the block #{}",
block
))
.into()),
};
let advance_to_next_cht = current_cht_num.is_some() && current_cht_num!= Some(new_cht_num);
if advance_to_next_cht {
let current_cht_num = current_cht_num.expect(
"advance_to_next_cht is true;
it is true only when current_cht_num is Some; qed",
);
assert!(
new_cht_num > current_cht_num,
"for_each_cht_group only supports ordered iterators"
);
functor_param =
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
current_cht_blocks.push(block);
current_cht_num = Some(new_cht_num);
}
if let Some(current_cht_num) = current_cht_num {
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
Ok(())
}
/// Build pairs for computing CHT.
fn build_pairs<Header, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Vec<(Vec<u8>, Vec<u8>)>>
where
Header: HeaderT,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let start_num = start_number(cht_size, cht_num);
let mut pairs = Vec::new();
let mut hash_index = Header::Number::zero();
for hash in hashes.into_iter() {
let hash =
hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?;
pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash)));
hash_index += Header::Number::one();
if hash_index == cht_size {
break
}
}
if hash_index == cht_size {
Ok(pairs)
} else {
Err(ClientError::MissingHashRequiredForCHT)
}
}
/// Get the starting block of a given CHT.
/// CHT 0 includes block 1...SIZE,
/// CHT 1 includes block SIZE + 1... 2*SIZE
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
/// This is because the genesis hash is assumed to be known
/// and including it would be redundant.
pub fn start_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num * cht_size) + N::one()
}
/// Get the ending block of a given CHT.
pub fn end_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num + N::one()) * cht_size
}
/// Convert a block number to a CHT number.
/// Returns `None` for `block_num` == 0, `Some` otherwise.
pub fn block_to_cht_number<N: AtLeast32Bit>(cht_size: N, block_num: N) -> Option<N> {
if block_num == N::zero() {
None
} else {
Some((block_num - N::one()) / cht_size)
}
}
/// Convert header number into CHT key.
pub fn encode_cht_key<N: Encode>(number: N) -> Vec<u8> {
number.encode()
}
/// Convert header hash into CHT value.
fn encode_cht_value<Hash: AsRef<[u8]>>(hash: Hash) -> Vec<u8> {
hash.as_ref().to_vec()
}
/// Convert CHT value into block header hash.
pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
match value.len() {
32 => Some(H256::from_slice(&value[0..32])),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{generic, traits::BlakeTwo256};
type Header = generic::Header<u64, BlakeTwo256>;
#[test]
fn is_build_required_works() {
assert_eq!(is_build_required(SIZE, 0u32.into()), None);
assert_eq!(is_build_required(SIZE, 1u32.into()), None);
assert_eq!(is_build_required(SIZE, SIZE), None);
assert_eq!(is_build_required(SIZE, SIZE + 1), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None);
}
#[test]
fn max_cht_number_works() {
assert_eq!(max_cht_number(SIZE, 0u32.into()), None);
assert_eq!(max_cht_number(SIZE, 1u32.into()), None);
assert_eq!(max_cht_number(SIZE, SIZE), None);
assert_eq!(max_cht_number(SIZE, SIZE + 1), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1));
}
#[test]
fn start_number_works() {
assert_eq!(start_number(SIZE, 0u32), 1u32);
assert_eq!(start_number(SIZE, 1u32), SIZE + 1);
assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1);
}
#[test]
fn end_number_works() {
assert_eq!(end_number(SIZE, 0u32), SIZE);
assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE);
assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE);
}
#[test]
fn build_pairs_fails_when_no_enough_blocks() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)
)
.is_err());
}
#[test]
fn build_pairs_fails_when_missing_block() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
.take(SIZE as usize / 2)
.chain(::std::iter::once(Ok(None)))
.chain(
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2))))
.take(SIZE as usize / 2 - 1)
)
)
.is_err());
}
#[test]
fn compute_root_works() {
assert!(compute_root::<Header, BlakeTwo256, _>(
SIZE as _,
42,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn build_proof_panics_when_querying_wrong_block() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE * 1000) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_err());
}
#[test]
fn build_proof_works() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE / 2) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn for_each_cht_group_panics() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![cht_size * 5, cht_size * 2],
|_, _, _| Ok(()),
(),
);
}
#[test]
fn for_each_cht_group_works() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![
cht_size * 2 + 1,
cht_size * 2 + 2,
cht_size * 2 + 5,
cht_size * 4 + 1,
cht_size * 4 + 7,
cht_size * 6 + 1,
],
|_, cht_num, blocks| {
match cht_num {
2 => assert_eq!(
blocks,
vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]
),
4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]),
6 => assert_eq!(blocks, vec![cht_size * 6 + 1]),
_ => unreachable!(),
}
Ok(())
},
(),
);
}
}
| {
return None
} | conditional_block |
cht.rs | // This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Canonical hash trie definitions and helper functions.
//!
//! Each CHT is a trie mapping block numbers to canonical hash.
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
//! request an inclusion proof of a specific block number against the trie with the
//! root has. A correct proof implies that the claimed block is identical to the one
//! we discarded.
use codec::Encode;
use hash_db;
use sp_trie;
use sp_core::{convert_hash, H256};
use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero};
use sp_state_machine::{
prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend,
Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend,
};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
/// The size of each CHT. This value is passed to every CHT-related function from
/// production code. Other values are passed from tests.
const SIZE: u32 = 2048;
/// Gets default CHT size.
pub fn size<N: From<u32>>() -> N {
SIZE.into()
}
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is
/// canonized.
pub fn is_build_required<N>(cht_size: N, block_num: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?;
let two = N::one() + N::one();
if block_cht_num < two {
return None
}
let cht_start = start_number(cht_size, block_cht_num.clone());
if cht_start!= block_num {
return None
}
Some(block_cht_num - two)
}
/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number.
pub fn max_cht_number<N>(cht_size: N, max_canonical_block: N) -> Option<N>
where
N: Clone + AtLeast32Bit,
{
let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?;
let two = N::one() + N::one();
if max_cht_number < two {
return None
}
Some(max_cht_number - two)
}
| /// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
/// Discards the trie's nodes.
pub fn compute_root<Header, Hasher, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Hasher::Out>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
use sp_trie::TrieConfiguration;
Ok(sp_trie::trie_types::Layout::<Hasher>::trie_root(build_pairs::<Header, I>(
cht_size, cht_num, hashes,
)?))
}
/// Build CHT-based header proof.
pub fn build_proof<Header, Hasher, BlocksI, HashesI>(
cht_size: Header::Number,
cht_num: Header::Number,
blocks: BlocksI,
hashes: HashesI,
) -> ClientResult<StorageProof>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
BlocksI: IntoIterator<Item = Header::Number>,
HashesI: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let transaction = build_pairs::<Header, _>(cht_size, cht_num, hashes)?
.into_iter()
.map(|(k, v)| (k, Some(v)))
.collect::<Vec<_>>();
let mut storage = InMemoryBackend::<Hasher>::default().update(vec![(None, transaction)]);
let trie_storage =
storage.as_trie_backend().expect("InMemoryState::as_trie_backend always returns Some; qed");
prove_read_on_trie_backend(
trie_storage,
blocks.into_iter().map(|number| encode_cht_key(number)),
)
.map_err(ClientError::Execution)
}
/// Check CHT-based header proof.
pub fn check_proof<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
remote_proof: StorageProof,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
move |local_root, local_cht_key| {
read_proof_check::<Hasher, _>(
local_root,
remote_proof,
::std::iter::once(local_cht_key),
)
.map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed"))
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof on pre-created proving backend.
pub fn check_proof_on_proving_backend<Header, Hasher>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
proving_backend: &TrieBackend<MemoryDB<Hasher>, Hasher>,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord + codec::Codec,
{
do_check_proof::<Header, Hasher, _>(
local_root,
local_number,
remote_hash,
|_, local_cht_key| {
read_proof_check_on_proving_backend::<Hasher>(proving_backend, local_cht_key)
.map_err(|e| ClientError::from(e))
},
)
}
/// Check CHT-based header proof using passed checker function.
fn do_check_proof<Header, Hasher, F>(
local_root: Header::Hash,
local_number: Header::Number,
remote_hash: Header::Hash,
checker: F,
) -> ClientResult<()>
where
Header: HeaderT,
Hasher: hash_db::Hasher,
Hasher::Out: Ord,
F: FnOnce(Hasher::Out, &[u8]) -> ClientResult<Option<Vec<u8>>>,
{
let root: Hasher::Out = convert_hash(&local_root);
let local_cht_key = encode_cht_key(local_number);
let local_cht_value = checker(root, &local_cht_key)?;
let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?;
let local_hash =
decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?;
match &local_hash[..] == remote_hash.as_ref() {
true => Ok(()),
false => Err(ClientError::InvalidCHTProof.into()),
}
}
/// Group ordered blocks by CHT number and call functor with blocks of each group.
pub fn for_each_cht_group<Header, I, F, P>(
cht_size: Header::Number,
blocks: I,
mut functor: F,
mut functor_param: P,
) -> ClientResult<()>
where
Header: HeaderT,
I: IntoIterator<Item = Header::Number>,
F: FnMut(P, Header::Number, Vec<Header::Number>) -> ClientResult<P>,
{
let mut current_cht_num = None;
let mut current_cht_blocks = Vec::new();
for block in blocks {
let new_cht_num = match block_to_cht_number(cht_size, block) {
Some(new_cht_num) => new_cht_num,
None =>
return Err(ClientError::Backend(format!(
"Cannot compute CHT root for the block #{}",
block
))
.into()),
};
let advance_to_next_cht = current_cht_num.is_some() && current_cht_num!= Some(new_cht_num);
if advance_to_next_cht {
let current_cht_num = current_cht_num.expect(
"advance_to_next_cht is true;
it is true only when current_cht_num is Some; qed",
);
assert!(
new_cht_num > current_cht_num,
"for_each_cht_group only supports ordered iterators"
);
functor_param =
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
current_cht_blocks.push(block);
current_cht_num = Some(new_cht_num);
}
if let Some(current_cht_num) = current_cht_num {
functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?;
}
Ok(())
}
/// Build pairs for computing CHT.
fn build_pairs<Header, I>(
cht_size: Header::Number,
cht_num: Header::Number,
hashes: I,
) -> ClientResult<Vec<(Vec<u8>, Vec<u8>)>>
where
Header: HeaderT,
I: IntoIterator<Item = ClientResult<Option<Header::Hash>>>,
{
let start_num = start_number(cht_size, cht_num);
let mut pairs = Vec::new();
let mut hash_index = Header::Number::zero();
for hash in hashes.into_iter() {
let hash =
hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?;
pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash)));
hash_index += Header::Number::one();
if hash_index == cht_size {
break
}
}
if hash_index == cht_size {
Ok(pairs)
} else {
Err(ClientError::MissingHashRequiredForCHT)
}
}
/// Get the starting block of a given CHT.
/// CHT 0 includes block 1...SIZE,
/// CHT 1 includes block SIZE + 1... 2*SIZE
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
/// This is because the genesis hash is assumed to be known
/// and including it would be redundant.
pub fn start_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num * cht_size) + N::one()
}
/// Get the ending block of a given CHT.
pub fn end_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
(cht_num + N::one()) * cht_size
}
/// Convert a block number to a CHT number.
/// Returns `None` for `block_num` == 0, `Some` otherwise.
pub fn block_to_cht_number<N: AtLeast32Bit>(cht_size: N, block_num: N) -> Option<N> {
if block_num == N::zero() {
None
} else {
Some((block_num - N::one()) / cht_size)
}
}
/// Convert header number into CHT key.
pub fn encode_cht_key<N: Encode>(number: N) -> Vec<u8> {
number.encode()
}
/// Convert header hash into CHT value.
fn encode_cht_value<Hash: AsRef<[u8]>>(hash: Hash) -> Vec<u8> {
hash.as_ref().to_vec()
}
/// Convert CHT value into block header hash.
pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
match value.len() {
32 => Some(H256::from_slice(&value[0..32])),
_ => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{generic, traits::BlakeTwo256};
type Header = generic::Header<u64, BlakeTwo256>;
#[test]
fn is_build_required_works() {
assert_eq!(is_build_required(SIZE, 0u32.into()), None);
assert_eq!(is_build_required(SIZE, 1u32.into()), None);
assert_eq!(is_build_required(SIZE, SIZE), None);
assert_eq!(is_build_required(SIZE, SIZE + 1), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE), None);
assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE), None);
assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None);
}
#[test]
fn max_cht_number_works() {
assert_eq!(max_cht_number(SIZE, 0u32.into()), None);
assert_eq!(max_cht_number(SIZE, 1u32.into()), None);
assert_eq!(max_cht_number(SIZE, SIZE), None);
assert_eq!(max_cht_number(SIZE, SIZE + 1), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE), None);
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0));
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1));
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1));
}
#[test]
fn start_number_works() {
assert_eq!(start_number(SIZE, 0u32), 1u32);
assert_eq!(start_number(SIZE, 1u32), SIZE + 1);
assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1);
}
#[test]
fn end_number_works() {
assert_eq!(end_number(SIZE, 0u32), SIZE);
assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE);
assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE);
}
#[test]
fn build_pairs_fails_when_no_enough_blocks() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)
)
.is_err());
}
#[test]
fn build_pairs_fails_when_missing_block() {
assert!(build_pairs::<Header, _>(
SIZE as _,
0,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
.take(SIZE as usize / 2)
.chain(::std::iter::once(Ok(None)))
.chain(
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2))))
.take(SIZE as usize / 2 - 1)
)
)
.is_err());
}
#[test]
fn compute_root_works() {
assert!(compute_root::<Header, BlakeTwo256, _>(
SIZE as _,
42,
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn build_proof_panics_when_querying_wrong_block() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE * 1000) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_err());
}
#[test]
fn build_proof_works() {
assert!(build_proof::<Header, BlakeTwo256, _, _>(
SIZE as _,
0,
vec![(SIZE / 2) as u64],
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize)
)
.is_ok());
}
#[test]
#[should_panic]
fn for_each_cht_group_panics() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![cht_size * 5, cht_size * 2],
|_, _, _| Ok(()),
(),
);
}
#[test]
fn for_each_cht_group_works() {
let cht_size = SIZE as u64;
let _ = for_each_cht_group::<Header, _, _, _>(
cht_size,
vec![
cht_size * 2 + 1,
cht_size * 2 + 2,
cht_size * 2 + 5,
cht_size * 4 + 1,
cht_size * 4 + 7,
cht_size * 6 + 1,
],
|_, cht_num, blocks| {
match cht_num {
2 => assert_eq!(
blocks,
vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]
),
4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]),
6 => assert_eq!(blocks, vec![cht_size * 6 + 1]),
_ => unreachable!(),
}
Ok(())
},
(),
);
}
} | /// Compute a CHT root from an iterator of block hashes. Fails if shorter than | random_line_split |
main.rs | Cow;
use std::cmp::min;
use std::fs::{rename, File, OpenOptions};
use std::io::{stdout, BufReader, Read, Write};
use std::net::IpAddr;
use structopt::StructOpt;
const PATH_HOSTSFILE: &str = "/etc/hosts";
const PATH_HOSTSFILE_NEW: &str = "/etc/hosts.new";
const PATH_CONFIG: &str = "/etc/hostsmod.yaml";
fn main() {
let hostname_os_string = hostname::get().expect("unable to determine system hostname");
let hostname = hostname_os_string
.to_str()
.expect("system hostname is not a valid UTF-8 string");
let mut opts: opts::HostsArgs = {
let app: structopt::clap::App = opts::HostsArgs::clap();
let str_about = format!(
r##"Tool for mopdifying system wide hosts file to simulate arbitrary DNS A and AAAA records.
Expects a hosts file at {:?} and a configuration in YAML format at {:?}. This
program is intended to be run by non-priviledged users with the help of setuid. It therefore has
some safety features.
Any modifications will not be persisted until the end of program execution. In the event of any
error, the original hosts file will not be modified.
The configuration defines a whitelist of hostnames that can be modified. This program will refuse
to modify any hostname not present in that list. It will also ensure that certain hostnames are
never modified:
- {:?}
- {:?}
- {:?}
- {:?}
- {:?}
- {:?} <- current hostname
The only exception is if the config variable `enable_dangerous_operations` is set to true. Then even
these reserved hostnames can be modified."##,
PATH_HOSTSFILE,
PATH_CONFIG,
config::RESERVED_LOCALHOST,
config::RESERVED_IP6_LOCALHOST,
config::RESERVED_IP6_LOOPBACK,
config::RESERVED_IP6_ALLNODES,
config::RESERVED_IP6_ALLROUTERS,
hostname
);
let app = app
//.before_help("PRE!!!")
//.after_help("POST!!!")
.about(str_about.as_ref());
opts::HostsArgs::from_clap(&app.get_matches())
};
if opts.generate_sample_config {
let mut out = stdout();
let mut sample = HostsmodConfig::default();
sample.whitelist.insert("somerandomhost.with.tld".into());
serde_yaml::to_writer(&mut out, &sample).expect("unable to write default config to stdout");
return;
}
let euid = users::get_effective_uid();
// dbg!(uid);
if euid!= 0 {
eprintln!("not effectively root, forced dry-run mode");
opts.dry_run = true;
}
// dbg!(opts);
// open file
let mut file_hosts_orig = OpenOptions::new()
.read(true)
//.write(!opts.dry_run)
.write(false)
.truncate(false)
.create(false)
.open(PATH_HOSTSFILE)
.expect("unable to open hosts");
// let opt_file_hosts_new = if opts.dry_run {
// None
// } else {
// Some(
// OpenOptions::new()
// .write(true)
// .create_new(true)
// .open(PATH_HOSTSFILE_NEW)
// .expect("unable to open new hosts file for writing! Stale file from previous run?"),
// )
// };
let mut str_content = String::with_capacity(1024 * 8);
let len_content = file_hosts_orig
.read_to_string(&mut str_content)
.expect("unable to read hosts file as UTF-8 string");
let mut hosts_parts =
try_parse_hosts(&str_content).expect("unable to parse contents of hosts file");
trim_hosts_parts(&mut hosts_parts);
let hosts_parts_orig = hosts_parts.clone();
// eprintln!("PRE-actions: {:#?}", &hosts_parts);
let cfg: HostsmodConfig = {
// TODO: check config file ownership & access rights
let file_cfg = BufReader::new(File::open(PATH_CONFIG).expect("unable to open config file"));
serde_yaml::from_reader(file_cfg).expect("unable to parse configuration")
};
if opts.dry_run || opts.verbose {
if opts.verbose {
eprintln!("config: {:#?}", cfg);
}
println!("original contents:\n>>>\n{}<<<", str_content);
}
let mut found_pre = vec![false; DONT_TOUCH.len()];
if!cfg.enable_dangerous_operations {
for (dt, found) in DONT_TOUCH.iter().zip(found_pre.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
if part.matches_hostname(&dt_host) && part.matches_ip(&dt.ip) {
*found = true;
}
}
}
}
let found_pre = found_pre;
// execute actions
perform_actions(&mut opts, &mut hosts_parts, &cfg).expect("unable to modify hosts file");
if!opts.dry_run && hosts_parts == hosts_parts_orig {
if opts.verbose {
println!("no changes, not modifying hosts file");
}
return;
}
// remove redundant Empty elements
trim_hosts_parts(&mut hosts_parts);
{
let mut remove = false;
hosts_parts.retain(|item| match (item.is_empty(), remove) {
(true, true) => false,
(true, false) => {
remove = true;
true
}
(false, _) => {
remove = false;
true
}
});
}
// eprintln!("POST-actions: {:#?}", &hosts_parts);
// compare against DONT_TOUCH
let buf_generate = generate_hosts_file(len_content, &hosts_parts);
// eprintln!(">\n{}<", &buf_generate);
// safety checks
if!cfg.enable_dangerous_operations {
let mut found_post = vec![false; DONT_TOUCH.len()];
for (dt, found) in DONT_TOUCH.iter().zip(found_post.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
match (part.matches_hostname(&dt_host), part.matches_ip(&dt.ip)) {
(true, true) => {
*found = true;
}
(true, false) => {
if DONT_TOUCH
.iter()
.find(|dt_lookup| {
// eprint!("conflict: {:?} == {:?} ", part, dt_lookup);
let res = part.matches_hostname(&dt_lookup.hostname)
&& part.matches_ip(&dt_lookup.ip);
// eprintln!("{}", res);
res
})
.is_none()
{
panic!(
"untouchable entry {:?} {:?} was changed! {:?}",
dt.ip, dt_host, part
);
}
// *found = true;
}
(false, _) => {}
}
}
}
if found_post!= found_pre {
dbg!(&found_pre);
dbg!(&found_post);
for (i, (pre, post)) in found_pre.iter().zip(found_post.iter()).enumerate() {
if pre!= post |
}
panic!("found_post!= found_pre");
}
}
if opts.dry_run || opts.verbose {
println!("generated:\n>>>\n{}<<<", &buf_generate);
}
if opts.dry_run {
println!("DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN");
println!("hosts file not modified");
return;
}
let mut file_hosts_new = OpenOptions::new()
.write(true)
.create_new(true)
.open(PATH_HOSTSFILE_NEW)
.expect("unable to open new hosts file for writing! Stale file from previous run?");
file_hosts_new
.write_all(buf_generate.as_bytes())
.expect("unable to write generated hosts file");
file_hosts_new
.set_len(buf_generate.as_bytes().len() as u64)
.expect("unable to truncate hosts file to right len");
file_hosts_new.flush().expect("unable to flush hosts file");
// close file handles
drop(file_hosts_new);
drop(file_hosts_orig);
rename(PATH_HOSTSFILE_NEW, PATH_HOSTSFILE).expect("unable to move new hosts file into place!");
}
fn trim_hosts_parts(hosts_parts: &mut Vec<HostsPart>) {
let trim = hosts_parts
.iter()
.rev()
.take_while(|part| part.is_empty())
.count();
hosts_parts.truncate(hosts_parts.len() - trim);
}
fn perform_actions(
opts: &mut opts::HostsArgs,
hosts: &mut Vec<HostsPart>,
config: &HostsmodConfig,
) -> Result<(), String> {
'loop_actions: for action in &opts.actions {
match action {
Action::Define(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining additionally...: {:?} += {:?}", ip, host);
let mut opt_insert = Some(hosts.len());
let mut host_found_v4 = false;
let mut host_found_v6 = false;
for (i, part) in hosts
.iter_mut()
.enumerate()
.filter(|(_i, p)| p.matches_ip(ip) || p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
let matches_hostname = part.matches_hostname(host);
if part.matches_ip(ip) && matches_hostname {
// eprintln!("already defined, NOP");
//opt_insert = None;
continue 'loop_actions;
}
if matches_hostname {
match part.get_family() {
Some(HostsPartFamily::IPv4) => {
if host_found_v4 || ip.is_ipv4() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv4
));
}
host_found_v4 = true;
}
Some(HostsPartFamily::IPv6) => {
if host_found_v6 || ip.is_ipv6() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv6
));
}
host_found_v6 = true;
}
None => {}
};
}
if opt_insert.is_some() {
opt_insert = Some(i + 1);
}
}
if let Some(insert) = opt_insert {
let insert = min(insert, hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
}
Action::DefineExclusive(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining exclusively...: {:?} += {:?}", ip, host);
let mut vec_remove = vec![];
for (i, _part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
// if part.matches_ip(ip) && part.matches_hostname(host) {
// eprintln!("already defined, NOP");
// return;
// }
// insert = i + 1;
vec_remove.push(i);
}
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
}
let insert = vec_remove.into_iter().min().unwrap_or(hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
Action::Remove(host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
let mut vec_remove = vec![];
let mut vec_insert = vec![];
let mut offset_remove = 0;
for (i, part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
match part {
HostsPart::Entry(ip, hosts, opt_comment) => {
// eprintln!("matching entry: {:?}", (&ip, &hosts, &opt_comment));
if hosts.len() > 1 {
let mut hosts_filtered = hosts.clone();
hosts_filtered.retain(|ent| ent!= host);
vec_insert.push((
i,
HostsPart::Entry(
ip.clone(),
hosts_filtered,
opt_comment.clone(),
),
));
offset_remove += 1;
}
vec_remove.push(offset_remove + i);
// for h in hosts {
// if h == host {
// }
// }
}
_ => {}
}
}
// dbg!(&vec_insert);
for (idx, part) in vec_insert {
hosts.insert(idx, part);
}
// dbg!(&vec_remove);
// unimplemented!();
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
| {
eprintln!("Difference: {:?}", DONT_TOUCH[i])
} | conditional_block |
main.rs | Cow;
use std::cmp::min;
use std::fs::{rename, File, OpenOptions};
use std::io::{stdout, BufReader, Read, Write};
use std::net::IpAddr;
use structopt::StructOpt;
const PATH_HOSTSFILE: &str = "/etc/hosts";
const PATH_HOSTSFILE_NEW: &str = "/etc/hosts.new";
const PATH_CONFIG: &str = "/etc/hostsmod.yaml";
fn main() {
let hostname_os_string = hostname::get().expect("unable to determine system hostname");
let hostname = hostname_os_string
.to_str()
.expect("system hostname is not a valid UTF-8 string");
let mut opts: opts::HostsArgs = {
let app: structopt::clap::App = opts::HostsArgs::clap();
let str_about = format!(
r##"Tool for mopdifying system wide hosts file to simulate arbitrary DNS A and AAAA records.
Expects a hosts file at {:?} and a configuration in YAML format at {:?}. This
program is intended to be run by non-priviledged users with the help of setuid. It therefore has
some safety features.
Any modifications will not be persisted until the end of program execution. In the event of any
error, the original hosts file will not be modified.
The configuration defines a whitelist of hostnames that can be modified. This program will refuse
to modify any hostname not present in that list. It will also ensure that certain hostnames are
never modified:
- {:?}
- {:?}
- {:?}
- {:?}
- {:?}
- {:?} <- current hostname
The only exception is if the config variable `enable_dangerous_operations` is set to true. Then even
these reserved hostnames can be modified."##,
PATH_HOSTSFILE,
PATH_CONFIG,
config::RESERVED_LOCALHOST,
config::RESERVED_IP6_LOCALHOST,
config::RESERVED_IP6_LOOPBACK,
config::RESERVED_IP6_ALLNODES,
config::RESERVED_IP6_ALLROUTERS,
hostname
);
let app = app
//.before_help("PRE!!!")
//.after_help("POST!!!")
.about(str_about.as_ref());
opts::HostsArgs::from_clap(&app.get_matches())
};
if opts.generate_sample_config {
let mut out = stdout();
let mut sample = HostsmodConfig::default();
sample.whitelist.insert("somerandomhost.with.tld".into());
serde_yaml::to_writer(&mut out, &sample).expect("unable to write default config to stdout");
return;
}
let euid = users::get_effective_uid();
// dbg!(uid);
if euid!= 0 {
eprintln!("not effectively root, forced dry-run mode");
opts.dry_run = true;
}
// dbg!(opts);
// open file
let mut file_hosts_orig = OpenOptions::new()
.read(true)
//.write(!opts.dry_run)
.write(false)
.truncate(false)
.create(false)
.open(PATH_HOSTSFILE)
.expect("unable to open hosts");
// let opt_file_hosts_new = if opts.dry_run {
// None
// } else {
// Some(
// OpenOptions::new()
// .write(true)
// .create_new(true)
// .open(PATH_HOSTSFILE_NEW)
// .expect("unable to open new hosts file for writing! Stale file from previous run?"),
// )
// };
let mut str_content = String::with_capacity(1024 * 8);
let len_content = file_hosts_orig
.read_to_string(&mut str_content)
.expect("unable to read hosts file as UTF-8 string");
let mut hosts_parts =
try_parse_hosts(&str_content).expect("unable to parse contents of hosts file");
trim_hosts_parts(&mut hosts_parts);
let hosts_parts_orig = hosts_parts.clone();
// eprintln!("PRE-actions: {:#?}", &hosts_parts);
let cfg: HostsmodConfig = {
// TODO: check config file ownership & access rights
let file_cfg = BufReader::new(File::open(PATH_CONFIG).expect("unable to open config file"));
serde_yaml::from_reader(file_cfg).expect("unable to parse configuration")
};
if opts.dry_run || opts.verbose {
if opts.verbose {
eprintln!("config: {:#?}", cfg);
}
println!("original contents:\n>>>\n{}<<<", str_content);
}
let mut found_pre = vec![false; DONT_TOUCH.len()];
if!cfg.enable_dangerous_operations {
for (dt, found) in DONT_TOUCH.iter().zip(found_pre.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
if part.matches_hostname(&dt_host) && part.matches_ip(&dt.ip) {
*found = true;
}
}
}
}
let found_pre = found_pre;
// execute actions
perform_actions(&mut opts, &mut hosts_parts, &cfg).expect("unable to modify hosts file");
if!opts.dry_run && hosts_parts == hosts_parts_orig {
if opts.verbose {
println!("no changes, not modifying hosts file");
}
return;
}
// remove redundant Empty elements
trim_hosts_parts(&mut hosts_parts);
{
let mut remove = false;
hosts_parts.retain(|item| match (item.is_empty(), remove) {
(true, true) => false,
(true, false) => {
remove = true;
true
}
(false, _) => {
remove = false;
true
}
});
}
// eprintln!("POST-actions: {:#?}", &hosts_parts);
// compare against DONT_TOUCH
let buf_generate = generate_hosts_file(len_content, &hosts_parts);
// eprintln!(">\n{}<", &buf_generate);
// safety checks
if!cfg.enable_dangerous_operations {
let mut found_post = vec![false; DONT_TOUCH.len()];
for (dt, found) in DONT_TOUCH.iter().zip(found_post.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
match (part.matches_hostname(&dt_host), part.matches_ip(&dt.ip)) {
(true, true) => {
*found = true;
}
(true, false) => {
if DONT_TOUCH
.iter()
.find(|dt_lookup| {
// eprint!("conflict: {:?} == {:?} ", part, dt_lookup);
let res = part.matches_hostname(&dt_lookup.hostname)
&& part.matches_ip(&dt_lookup.ip);
// eprintln!("{}", res);
res
})
.is_none()
{
panic!(
"untouchable entry {:?} {:?} was changed! {:?}",
dt.ip, dt_host, part
);
}
// *found = true;
}
(false, _) => {}
}
}
}
if found_post!= found_pre {
dbg!(&found_pre);
dbg!(&found_post);
for (i, (pre, post)) in found_pre.iter().zip(found_post.iter()).enumerate() {
if pre!= post {
eprintln!("Difference: {:?}", DONT_TOUCH[i])
}
}
panic!("found_post!= found_pre");
}
}
if opts.dry_run || opts.verbose {
println!("generated:\n>>>\n{}<<<", &buf_generate);
}
if opts.dry_run {
println!("DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN");
println!("hosts file not modified");
return;
}
let mut file_hosts_new = OpenOptions::new()
.write(true)
.create_new(true)
.open(PATH_HOSTSFILE_NEW)
.expect("unable to open new hosts file for writing! Stale file from previous run?");
file_hosts_new
.write_all(buf_generate.as_bytes())
.expect("unable to write generated hosts file");
file_hosts_new
.set_len(buf_generate.as_bytes().len() as u64)
.expect("unable to truncate hosts file to right len");
file_hosts_new.flush().expect("unable to flush hosts file");
// close file handles
drop(file_hosts_new);
drop(file_hosts_orig);
rename(PATH_HOSTSFILE_NEW, PATH_HOSTSFILE).expect("unable to move new hosts file into place!");
}
fn trim_hosts_parts(hosts_parts: &mut Vec<HostsPart>) {
let trim = hosts_parts
.iter()
.rev()
.take_while(|part| part.is_empty())
.count();
hosts_parts.truncate(hosts_parts.len() - trim);
}
fn perform_actions(
opts: &mut opts::HostsArgs,
hosts: &mut Vec<HostsPart>,
config: &HostsmodConfig,
) -> Result<(), String> | //opt_insert = None;
continue 'loop_actions;
}
if matches_hostname {
match part.get_family() {
Some(HostsPartFamily::IPv4) => {
if host_found_v4 || ip.is_ipv4() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv4
));
}
host_found_v4 = true;
}
Some(HostsPartFamily::IPv6) => {
if host_found_v6 || ip.is_ipv6() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv6
));
}
host_found_v6 = true;
}
None => {}
};
}
if opt_insert.is_some() {
opt_insert = Some(i + 1);
}
}
if let Some(insert) = opt_insert {
let insert = min(insert, hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
}
Action::DefineExclusive(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining exclusively...: {:?} += {:?}", ip, host);
let mut vec_remove = vec![];
for (i, _part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
// if part.matches_ip(ip) && part.matches_hostname(host) {
// eprintln!("already defined, NOP");
// return;
// }
// insert = i + 1;
vec_remove.push(i);
}
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
}
let insert = vec_remove.into_iter().min().unwrap_or(hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
Action::Remove(host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
let mut vec_remove = vec![];
let mut vec_insert = vec![];
let mut offset_remove = 0;
for (i, part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
match part {
HostsPart::Entry(ip, hosts, opt_comment) => {
// eprintln!("matching entry: {:?}", (&ip, &hosts, &opt_comment));
if hosts.len() > 1 {
let mut hosts_filtered = hosts.clone();
hosts_filtered.retain(|ent| ent!= host);
vec_insert.push((
i,
HostsPart::Entry(
ip.clone(),
hosts_filtered,
opt_comment.clone(),
),
));
offset_remove += 1;
}
vec_remove.push(offset_remove + i);
// for h in hosts {
// if h == host {
// }
// }
}
_ => {}
}
}
// dbg!(&vec_insert);
for (idx, part) in vec_insert {
hosts.insert(idx, part);
}
// dbg!(&vec_remove);
// unimplemented!();
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
| {
'loop_actions: for action in &opts.actions {
match action {
Action::Define(ip, host) => {
if !config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining additionally...: {:?} += {:?}", ip, host);
let mut opt_insert = Some(hosts.len());
let mut host_found_v4 = false;
let mut host_found_v6 = false;
for (i, part) in hosts
.iter_mut()
.enumerate()
.filter(|(_i, p)| p.matches_ip(ip) || p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
let matches_hostname = part.matches_hostname(host);
if part.matches_ip(ip) && matches_hostname {
// eprintln!("already defined, NOP"); | identifier_body |
main.rs | borrow::Cow;
use std::cmp::min;
use std::fs::{rename, File, OpenOptions};
use std::io::{stdout, BufReader, Read, Write};
use std::net::IpAddr;
use structopt::StructOpt;
const PATH_HOSTSFILE: &str = "/etc/hosts";
const PATH_HOSTSFILE_NEW: &str = "/etc/hosts.new";
const PATH_CONFIG: &str = "/etc/hostsmod.yaml";
fn main() {
let hostname_os_string = hostname::get().expect("unable to determine system hostname");
let hostname = hostname_os_string
.to_str()
.expect("system hostname is not a valid UTF-8 string");
let mut opts: opts::HostsArgs = {
let app: structopt::clap::App = opts::HostsArgs::clap();
let str_about = format!(
r##"Tool for mopdifying system wide hosts file to simulate arbitrary DNS A and AAAA records.
Expects a hosts file at {:?} and a configuration in YAML format at {:?}. This
program is intended to be run by non-priviledged users with the help of setuid. It therefore has
some safety features.
Any modifications will not be persisted until the end of program execution. In the event of any
error, the original hosts file will not be modified.
The configuration defines a whitelist of hostnames that can be modified. This program will refuse
to modify any hostname not present in that list. It will also ensure that certain hostnames are
never modified:
- {:?}
- {:?}
- {:?}
- {:?}
- {:?}
- {:?} <- current hostname
The only exception is if the config variable `enable_dangerous_operations` is set to true. Then even
these reserved hostnames can be modified."##,
PATH_HOSTSFILE,
PATH_CONFIG,
config::RESERVED_LOCALHOST,
config::RESERVED_IP6_LOCALHOST,
config::RESERVED_IP6_LOOPBACK,
config::RESERVED_IP6_ALLNODES,
config::RESERVED_IP6_ALLROUTERS,
hostname
);
let app = app
//.before_help("PRE!!!")
//.after_help("POST!!!")
.about(str_about.as_ref());
opts::HostsArgs::from_clap(&app.get_matches())
};
if opts.generate_sample_config {
let mut out = stdout();
let mut sample = HostsmodConfig::default();
sample.whitelist.insert("somerandomhost.with.tld".into());
serde_yaml::to_writer(&mut out, &sample).expect("unable to write default config to stdout");
return;
}
let euid = users::get_effective_uid();
// dbg!(uid);
if euid!= 0 {
eprintln!("not effectively root, forced dry-run mode");
opts.dry_run = true;
}
// dbg!(opts);
// open file
let mut file_hosts_orig = OpenOptions::new()
.read(true)
//.write(!opts.dry_run)
.write(false)
.truncate(false)
.create(false)
.open(PATH_HOSTSFILE)
.expect("unable to open hosts");
// let opt_file_hosts_new = if opts.dry_run {
// None
// } else {
// Some(
// OpenOptions::new()
// .write(true)
// .create_new(true)
// .open(PATH_HOSTSFILE_NEW)
// .expect("unable to open new hosts file for writing! Stale file from previous run?"),
// )
// };
let mut str_content = String::with_capacity(1024 * 8);
let len_content = file_hosts_orig
.read_to_string(&mut str_content)
.expect("unable to read hosts file as UTF-8 string");
let mut hosts_parts =
try_parse_hosts(&str_content).expect("unable to parse contents of hosts file");
trim_hosts_parts(&mut hosts_parts);
let hosts_parts_orig = hosts_parts.clone();
// eprintln!("PRE-actions: {:#?}", &hosts_parts);
let cfg: HostsmodConfig = {
// TODO: check config file ownership & access rights
let file_cfg = BufReader::new(File::open(PATH_CONFIG).expect("unable to open config file"));
serde_yaml::from_reader(file_cfg).expect("unable to parse configuration")
};
if opts.dry_run || opts.verbose {
if opts.verbose {
eprintln!("config: {:#?}", cfg);
}
println!("original contents:\n>>>\n{}<<<", str_content);
}
let mut found_pre = vec![false; DONT_TOUCH.len()];
if!cfg.enable_dangerous_operations {
for (dt, found) in DONT_TOUCH.iter().zip(found_pre.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
if part.matches_hostname(&dt_host) && part.matches_ip(&dt.ip) {
*found = true;
}
}
}
}
let found_pre = found_pre;
// execute actions
perform_actions(&mut opts, &mut hosts_parts, &cfg).expect("unable to modify hosts file");
if!opts.dry_run && hosts_parts == hosts_parts_orig {
if opts.verbose {
println!("no changes, not modifying hosts file");
}
return;
}
// remove redundant Empty elements
trim_hosts_parts(&mut hosts_parts);
{
let mut remove = false;
hosts_parts.retain(|item| match (item.is_empty(), remove) {
(true, true) => false,
(true, false) => {
remove = true;
true
}
(false, _) => {
remove = false;
true
}
});
}
// eprintln!("POST-actions: {:#?}", &hosts_parts);
// compare against DONT_TOUCH
let buf_generate = generate_hosts_file(len_content, &hosts_parts);
// eprintln!(">\n{}<", &buf_generate);
// safety checks
if!cfg.enable_dangerous_operations {
let mut found_post = vec![false; DONT_TOUCH.len()];
for (dt, found) in DONT_TOUCH.iter().zip(found_post.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
match (part.matches_hostname(&dt_host), part.matches_ip(&dt.ip)) {
(true, true) => {
*found = true;
}
(true, false) => {
if DONT_TOUCH
.iter()
.find(|dt_lookup| {
// eprint!("conflict: {:?} == {:?} ", part, dt_lookup);
let res = part.matches_hostname(&dt_lookup.hostname)
&& part.matches_ip(&dt_lookup.ip);
// eprintln!("{}", res);
res
})
.is_none()
{
panic!(
"untouchable entry {:?} {:?} was changed! {:?}",
dt.ip, dt_host, part
);
}
// *found = true;
}
(false, _) => {}
}
}
}
if found_post!= found_pre {
dbg!(&found_pre);
dbg!(&found_post);
for (i, (pre, post)) in found_pre.iter().zip(found_post.iter()).enumerate() {
if pre!= post {
eprintln!("Difference: {:?}", DONT_TOUCH[i])
}
}
panic!("found_post!= found_pre");
}
}
if opts.dry_run || opts.verbose {
println!("generated:\n>>>\n{}<<<", &buf_generate);
}
if opts.dry_run {
println!("DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN");
println!("hosts file not modified");
return;
}
let mut file_hosts_new = OpenOptions::new()
.write(true)
.create_new(true)
.open(PATH_HOSTSFILE_NEW)
.expect("unable to open new hosts file for writing! Stale file from previous run?");
file_hosts_new
.write_all(buf_generate.as_bytes())
.expect("unable to write generated hosts file");
file_hosts_new
.set_len(buf_generate.as_bytes().len() as u64)
.expect("unable to truncate hosts file to right len");
file_hosts_new.flush().expect("unable to flush hosts file");
// close file handles
drop(file_hosts_new);
drop(file_hosts_orig);
rename(PATH_HOSTSFILE_NEW, PATH_HOSTSFILE).expect("unable to move new hosts file into place!");
}
fn trim_hosts_parts(hosts_parts: &mut Vec<HostsPart>) {
let trim = hosts_parts
.iter()
.rev()
.take_while(|part| part.is_empty())
.count();
hosts_parts.truncate(hosts_parts.len() - trim);
}
fn perform_actions(
opts: &mut opts::HostsArgs,
hosts: &mut Vec<HostsPart>,
config: &HostsmodConfig,
) -> Result<(), String> {
'loop_actions: for action in &opts.actions {
match action {
Action::Define(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining additionally...: {:?} += {:?}", ip, host);
let mut opt_insert = Some(hosts.len());
let mut host_found_v4 = false;
let mut host_found_v6 = false;
for (i, part) in hosts
.iter_mut() | // eprintln!("matching entry: {:?}", part);
let matches_hostname = part.matches_hostname(host);
if part.matches_ip(ip) && matches_hostname {
// eprintln!("already defined, NOP");
//opt_insert = None;
continue 'loop_actions;
}
if matches_hostname {
match part.get_family() {
Some(HostsPartFamily::IPv4) => {
if host_found_v4 || ip.is_ipv4() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv4
));
}
host_found_v4 = true;
}
Some(HostsPartFamily::IPv6) => {
if host_found_v6 || ip.is_ipv6() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv6
));
}
host_found_v6 = true;
}
None => {}
};
}
if opt_insert.is_some() {
opt_insert = Some(i + 1);
}
}
if let Some(insert) = opt_insert {
let insert = min(insert, hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
}
Action::DefineExclusive(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining exclusively...: {:?} += {:?}", ip, host);
let mut vec_remove = vec![];
for (i, _part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
// if part.matches_ip(ip) && part.matches_hostname(host) {
// eprintln!("already defined, NOP");
// return;
// }
// insert = i + 1;
vec_remove.push(i);
}
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
}
let insert = vec_remove.into_iter().min().unwrap_or(hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
Action::Remove(host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
let mut vec_remove = vec![];
let mut vec_insert = vec![];
let mut offset_remove = 0;
for (i, part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
match part {
HostsPart::Entry(ip, hosts, opt_comment) => {
// eprintln!("matching entry: {:?}", (&ip, &hosts, &opt_comment));
if hosts.len() > 1 {
let mut hosts_filtered = hosts.clone();
hosts_filtered.retain(|ent| ent!= host);
vec_insert.push((
i,
HostsPart::Entry(
ip.clone(),
hosts_filtered,
opt_comment.clone(),
),
));
offset_remove += 1;
}
vec_remove.push(offset_remove + i);
// for h in hosts {
// if h == host {
// }
// }
}
_ => {}
}
}
// dbg!(&vec_insert);
for (idx, part) in vec_insert {
hosts.insert(idx, part);
}
// dbg!(&vec_remove);
// unimplemented!();
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
| .enumerate()
.filter(|(_i, p)| p.matches_ip(ip) || p.matches_hostname(host))
{ | random_line_split |
main.rs |
let hostname_os_string = hostname::get().expect("unable to determine system hostname");
let hostname = hostname_os_string
.to_str()
.expect("system hostname is not a valid UTF-8 string");
let mut opts: opts::HostsArgs = {
let app: structopt::clap::App = opts::HostsArgs::clap();
let str_about = format!(
r##"Tool for mopdifying system wide hosts file to simulate arbitrary DNS A and AAAA records.
Expects a hosts file at {:?} and a configuration in YAML format at {:?}. This
program is intended to be run by non-priviledged users with the help of setuid. It therefore has
some safety features.
Any modifications will not be persisted until the end of program execution. In the event of any
error, the original hosts file will not be modified.
The configuration defines a whitelist of hostnames that can be modified. This program will refuse
to modify any hostname not present in that list. It will also ensure that certain hostnames are
never modified:
- {:?}
- {:?}
- {:?}
- {:?}
- {:?}
- {:?} <- current hostname
The only exception is if the config variable `enable_dangerous_operations` is set to true. Then even
these reserved hostnames can be modified."##,
PATH_HOSTSFILE,
PATH_CONFIG,
config::RESERVED_LOCALHOST,
config::RESERVED_IP6_LOCALHOST,
config::RESERVED_IP6_LOOPBACK,
config::RESERVED_IP6_ALLNODES,
config::RESERVED_IP6_ALLROUTERS,
hostname
);
let app = app
//.before_help("PRE!!!")
//.after_help("POST!!!")
.about(str_about.as_ref());
opts::HostsArgs::from_clap(&app.get_matches())
};
if opts.generate_sample_config {
let mut out = stdout();
let mut sample = HostsmodConfig::default();
sample.whitelist.insert("somerandomhost.with.tld".into());
serde_yaml::to_writer(&mut out, &sample).expect("unable to write default config to stdout");
return;
}
let euid = users::get_effective_uid();
// dbg!(uid);
if euid!= 0 {
eprintln!("not effectively root, forced dry-run mode");
opts.dry_run = true;
}
// dbg!(opts);
// open file
let mut file_hosts_orig = OpenOptions::new()
.read(true)
//.write(!opts.dry_run)
.write(false)
.truncate(false)
.create(false)
.open(PATH_HOSTSFILE)
.expect("unable to open hosts");
// let opt_file_hosts_new = if opts.dry_run {
// None
// } else {
// Some(
// OpenOptions::new()
// .write(true)
// .create_new(true)
// .open(PATH_HOSTSFILE_NEW)
// .expect("unable to open new hosts file for writing! Stale file from previous run?"),
// )
// };
let mut str_content = String::with_capacity(1024 * 8);
let len_content = file_hosts_orig
.read_to_string(&mut str_content)
.expect("unable to read hosts file as UTF-8 string");
let mut hosts_parts =
try_parse_hosts(&str_content).expect("unable to parse contents of hosts file");
trim_hosts_parts(&mut hosts_parts);
let hosts_parts_orig = hosts_parts.clone();
// eprintln!("PRE-actions: {:#?}", &hosts_parts);
let cfg: HostsmodConfig = {
// TODO: check config file ownership & access rights
let file_cfg = BufReader::new(File::open(PATH_CONFIG).expect("unable to open config file"));
serde_yaml::from_reader(file_cfg).expect("unable to parse configuration")
};
if opts.dry_run || opts.verbose {
if opts.verbose {
eprintln!("config: {:#?}", cfg);
}
println!("original contents:\n>>>\n{}<<<", str_content);
}
let mut found_pre = vec![false; DONT_TOUCH.len()];
if!cfg.enable_dangerous_operations {
for (dt, found) in DONT_TOUCH.iter().zip(found_pre.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
if part.matches_hostname(&dt_host) && part.matches_ip(&dt.ip) {
*found = true;
}
}
}
}
let found_pre = found_pre;
// execute actions
perform_actions(&mut opts, &mut hosts_parts, &cfg).expect("unable to modify hosts file");
if!opts.dry_run && hosts_parts == hosts_parts_orig {
if opts.verbose {
println!("no changes, not modifying hosts file");
}
return;
}
// remove redundant Empty elements
trim_hosts_parts(&mut hosts_parts);
{
let mut remove = false;
hosts_parts.retain(|item| match (item.is_empty(), remove) {
(true, true) => false,
(true, false) => {
remove = true;
true
}
(false, _) => {
remove = false;
true
}
});
}
// eprintln!("POST-actions: {:#?}", &hosts_parts);
// compare against DONT_TOUCH
let buf_generate = generate_hosts_file(len_content, &hosts_parts);
// eprintln!(">\n{}<", &buf_generate);
// safety checks
if!cfg.enable_dangerous_operations {
let mut found_post = vec![false; DONT_TOUCH.len()];
for (dt, found) in DONT_TOUCH.iter().zip(found_post.iter_mut()) {
let dt_host = if dt.hostname == RESERVED_HOSTNAME {
Cow::Borrowed(hostname)
} else {
Cow::Borrowed(dt.hostname.as_ref())
};
for part in &hosts_parts {
match (part.matches_hostname(&dt_host), part.matches_ip(&dt.ip)) {
(true, true) => {
*found = true;
}
(true, false) => {
if DONT_TOUCH
.iter()
.find(|dt_lookup| {
// eprint!("conflict: {:?} == {:?} ", part, dt_lookup);
let res = part.matches_hostname(&dt_lookup.hostname)
&& part.matches_ip(&dt_lookup.ip);
// eprintln!("{}", res);
res
})
.is_none()
{
panic!(
"untouchable entry {:?} {:?} was changed! {:?}",
dt.ip, dt_host, part
);
}
// *found = true;
}
(false, _) => {}
}
}
}
if found_post!= found_pre {
dbg!(&found_pre);
dbg!(&found_post);
for (i, (pre, post)) in found_pre.iter().zip(found_post.iter()).enumerate() {
if pre!= post {
eprintln!("Difference: {:?}", DONT_TOUCH[i])
}
}
panic!("found_post!= found_pre");
}
}
if opts.dry_run || opts.verbose {
println!("generated:\n>>>\n{}<<<", &buf_generate);
}
if opts.dry_run {
println!("DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN DRY-RUN");
println!("hosts file not modified");
return;
}
let mut file_hosts_new = OpenOptions::new()
.write(true)
.create_new(true)
.open(PATH_HOSTSFILE_NEW)
.expect("unable to open new hosts file for writing! Stale file from previous run?");
file_hosts_new
.write_all(buf_generate.as_bytes())
.expect("unable to write generated hosts file");
file_hosts_new
.set_len(buf_generate.as_bytes().len() as u64)
.expect("unable to truncate hosts file to right len");
file_hosts_new.flush().expect("unable to flush hosts file");
// close file handles
drop(file_hosts_new);
drop(file_hosts_orig);
rename(PATH_HOSTSFILE_NEW, PATH_HOSTSFILE).expect("unable to move new hosts file into place!");
}
fn trim_hosts_parts(hosts_parts: &mut Vec<HostsPart>) {
let trim = hosts_parts
.iter()
.rev()
.take_while(|part| part.is_empty())
.count();
hosts_parts.truncate(hosts_parts.len() - trim);
}
fn perform_actions(
opts: &mut opts::HostsArgs,
hosts: &mut Vec<HostsPart>,
config: &HostsmodConfig,
) -> Result<(), String> {
'loop_actions: for action in &opts.actions {
match action {
Action::Define(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining additionally...: {:?} += {:?}", ip, host);
let mut opt_insert = Some(hosts.len());
let mut host_found_v4 = false;
let mut host_found_v6 = false;
for (i, part) in hosts
.iter_mut()
.enumerate()
.filter(|(_i, p)| p.matches_ip(ip) || p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
let matches_hostname = part.matches_hostname(host);
if part.matches_ip(ip) && matches_hostname {
// eprintln!("already defined, NOP");
//opt_insert = None;
continue 'loop_actions;
}
if matches_hostname {
match part.get_family() {
Some(HostsPartFamily::IPv4) => {
if host_found_v4 || ip.is_ipv4() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv4
));
}
host_found_v4 = true;
}
Some(HostsPartFamily::IPv6) => {
if host_found_v6 || ip.is_ipv6() {
return Err(format!(
"duplicate entry for host {:?} {:?}",
host,
HostsPartFamily::IPv6
));
}
host_found_v6 = true;
}
None => {}
};
}
if opt_insert.is_some() {
opt_insert = Some(i + 1);
}
}
if let Some(insert) = opt_insert {
let insert = min(insert, hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
}
Action::DefineExclusive(ip, host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
// eprintln!("defining exclusively...: {:?} += {:?}", ip, host);
let mut vec_remove = vec![];
for (i, _part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
// eprintln!("matching entry: {:?}", part);
// if part.matches_ip(ip) && part.matches_hostname(host) {
// eprintln!("already defined, NOP");
// return;
// }
// insert = i + 1;
vec_remove.push(i);
}
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
}
let insert = vec_remove.into_iter().min().unwrap_or(hosts.len());
hosts.insert(
insert,
HostsPart::Entry(ip.clone(), vec![Cow::Owned(host.clone())], None),
);
}
Action::Remove(host) => {
if!config.whitelist.contains(host) {
return Err(format!("HOST {:?} not whitelisted!", host));
}
let mut vec_remove = vec![];
let mut vec_insert = vec![];
let mut offset_remove = 0;
for (i, part) in hosts
.iter()
.enumerate()
.filter(|(_i, p)| p.matches_hostname(host))
{
match part {
HostsPart::Entry(ip, hosts, opt_comment) => {
// eprintln!("matching entry: {:?}", (&ip, &hosts, &opt_comment));
if hosts.len() > 1 {
let mut hosts_filtered = hosts.clone();
hosts_filtered.retain(|ent| ent!= host);
vec_insert.push((
i,
HostsPart::Entry(
ip.clone(),
hosts_filtered,
opt_comment.clone(),
),
));
offset_remove += 1;
}
vec_remove.push(offset_remove + i);
// for h in hosts {
// if h == host {
// }
// }
}
_ => {}
}
}
// dbg!(&vec_insert);
for (idx, part) in vec_insert {
hosts.insert(idx, part);
}
// dbg!(&vec_remove);
// unimplemented!();
for remove in vec_remove.iter().rev() {
hosts.remove(*remove);
}
}
}
}
Ok(())
}
fn generate_hosts_file(len_content: usize, parsed: &Vec<HostsPart>) -> String {
let mut buf_generate = String::with_capacity(len_content);
// eprintln!("rendering: {:?}", parsed);
fn | render_entry | identifier_name |
|
game.rs | use world::World;
use piston_window::*;
use camera::Camera;
use cgmath::{Vector2, vec3};
use cgmath::prelude::*;
use color::*;
use car::*;
use piston_window::Ellipse;
use bot::BoxRules;
use std::cell::RefCell;
use std::ops::DerefMut;
use std::time::Instant;
// `Game` contains every things to run the game
pub struct Game {
config: GameConfig,
world: World, // All objects in the game
window: PistonWindow,
bot_rules: BoxRules, // Rules to create a new bot
camera: Camera, // Camera for rendering
state: State, // Current state of game
// Wrap these caches in `RefCell` to allow interior mutability
glyphs: RefCell<Glyphs>, // Font cache
ellipse: RefCell<Ellipse>, // Model to draw a circle
}
struct State {
pub turn: Turn, // Presents movement of player
pub sprint: bool, // Player is speeding-up or not
pub spawn: f64, // Count down time to spawn a new bot
pub ended: bool, // Game is over or not
pub game_speed: f64, // Game speed in addition to player's speed
pub jump_timeout: f64, // Count down to allow the next jump
pub rotate_cam: bool, // Allow rotation of camera or not
pub bullets: i64, // The number of bullets left
pub recharge: f64, // Bullets recharge time
pub fps: f64, // Real fps of game
pub last_frame: Instant, // Moment of the last draw
}
pub enum Turn { Left, Right, None, }
// Configurable game's contansts.
// A tuple presents a range of something.
#[derive(Serialize, Deserialize, Clone)]
pub struct GameConfig {
pub title: String,
pub screen_size: ::Pixel,
pub ups: u64, // Update per second
pub max_fps: u64,
pub tunel_size: [f64; 3],
pub player_size: [f64; 3],
pub player_speed: (f64, f64), // min and max player speed
pub player_turn_speed: f64,
pub bot_size: [(f64, f64); 3], // Range of bot's size
pub bot_speed: (f64, f64),
pub bot_turn_speed: (f64, f64),
pub divider_size: [f64; 2],
pub camera_height: f64, // Height of camera (from player)
pub camera_distance: f64, // Distance from camera to player
pub decor_distance: f64, // Distance between each decoration
pub sprint_factor: f64,
pub spawn_time: (f64, f64),
pub game_sprint: f64, // The increase of game_speed
pub game_max_speed: f64,
pub player_jump_v: f64,
pub player_jump_a: f64,
pub jump_turn_decrease: f64,
pub jump_timeout: f64,
pub mouse_speed: f64,
pub trueshot_distance: f64,
pub bullet_stock: i64, // Number of bullets
pub recharge_time: f64,
pub bullet_len: f64,
pub bullet_speed: f64,
pub zoom_in: bool, // If true, zoom-in while on stare mode
}
impl Game {
pub fn new(config: GameConfig) -> Game {
let mut window: PistonWindow = WindowSettings::new(
config.title.clone(), [config.screen_size.w, config.screen_size.h])
.exit_on_esc(true).build()
.expect("Cannot create window.");
window.set_ups(config.ups);
window.set_max_fps(config.max_fps);
window.set_capture_cursor(true);
let glyphs = Glyphs::new("resources/Ubuntu-R.ttf", window.factory.clone())
.expect("Unable to load font.");
let bot_rules = BoxRules {
size: config.bot_size,
position: [(0., config.tunel_size[0]), (0., 0.), (config.tunel_size[2], config.tunel_size[2])],
speed: config.bot_speed,
turn_speed: config.bot_turn_speed,
color: vec![RED, ORANGE, VIOLET, GREEN, PALE],
jump_turn_decrease: config.jump_turn_decrease,
};
let world = World::new(&config);
let camera = Game::new_camera(&config, &world.player);
let state = State {
turn: Turn::None,
sprint: false,
spawn: 0.,
ended: false,
game_speed: 0.,
jump_timeout: 0.,
rotate_cam: false,
bullets: config.bullet_stock,
recharge: 0.,
fps: 0.,
last_frame: Instant::now(),
};
let ellipse = Ellipse {
color: BLACK.alpha(0.).into(),
border: Some(ellipse::Border {
color: RED.alpha(0.5).into(),
radius: 1.,
}),
resolution: 16,
};
Game {
config: config,
world: world,
window: window,
bot_rules: bot_rules,
camera: camera,
state: state,
glyphs: RefCell::new(glyphs),
ellipse: RefCell::new(ellipse),
}
}
fn new_camera<T: Car>(config: &GameConfig, player: &T) -> Camera {
Camera::new(
config.screen_size.clone(),
vec3(0., config.camera_height, -config.camera_distance) + player.pos()
)
}
// Re-calculate fps
fn update_fps(&mut self) {
let d = self.state.last_frame.elapsed();
self.state.last_frame = Instant::now();
self.state.fps = 1. / (d.as_secs() as f64 + 1e-9*d.subsec_nanos() as f64);
}
pub fn run(&mut self) {
while let Some(e) = self.window.next() {
match e {
Input::Press(key) => self.press(key),
Input::Release(key) => self.release(key),
Input::Render(_) => {
self.update_fps();
self.draw(&e);
},
Input::Update(args) => self.update(args.dt),
Input::Move(Motion::MouseRelative(a, b)) => self.mouse_move(a as f64, b as f64),
_ => {}
}
if self.state.ended {
break;
}
}
}
fn mouse_move(&mut self, x: f64, y: f64) {
if self.state.rotate_cam {
self.camera.rotate(x*self.config.mouse_speed, y*self.config.mouse_speed, self.world.player.position);
}
}
fn press(&mut self, key: Button) {
match key {
Button::Keyboard(Key::A) => self.state.turn = Turn::Left,
Button::Keyboard(Key::D) => self.state.turn = Turn::Right,
Button::Keyboard(Key::W) => self.state.sprint = true,
Button::Keyboard(Key::Space) => if self.state.jump_timeout <= 0. {
self.state.jump_timeout = self.config.jump_timeout;
self.world.player.jump();
},
Button::Mouse(MouseButton::Right) => {
if self.config.zoom_in {
self.camera.zoom_in();
}
self.state.rotate_cam = true;
},
Button::Mouse(MouseButton::Left) => if self.state.rotate_cam && self.state.bullets > 0 {
let mut pos = self.world.player.position;
pos.y += self.world.player.size.y;
let mut d = vec3(0., 0., self.config.trueshot_distance + self.config.camera_distance);
d = self.camera.c * d.magnitude2() / d.dot(self.camera.c);
d = self.camera.eye + d - pos;
d = d * self.config.bullet_speed / d.magnitude();
self.world.add_bullet(pos, d, self.config.bullet_len);
self.state.bullets -= 1;
if self.state.bullets <= 0 {
self.state.recharge = self.config.recharge_time;
}
},
_ => (),
}
}
fn release(&mut self, key: Button) |
fn draw(&mut self, e: &Input) {
// Return a horizontal bar
macro_rules! bar {
($curr: expr, $full: expr) => {
[0.,
15.0,
self.config.screen_size.w as f64/2.*$curr/$full,
20.0,]
};
}
let jump_bar = bar!(self.state.jump_timeout, self.config.jump_timeout);
let recharge_bar = bar!(self.state.recharge, self.config.recharge_time);
let bullets_bar = bar!(self.state.bullets as f64, self.config.bullet_stock as f64);
// Closure in `draw_2d` requires unique access to `self`,
// so we use RefCell to hack it.
let mut glyphs = self.glyphs.borrow_mut();
let fps = format!("{:.3}", self.state.fps);
let lines = self.world.render(&self.camera);
self.window.draw_2d(e, |c, g| {
clear(BLACK.into(), g);
for (l, color) in lines {
line(color.into(), 1., convert(l), c.transform, g);
}
rectangle(BLUE.alpha(0.4).into(), jump_bar, c.transform, g);
rectangle(RED.alpha(0.4).into(), recharge_bar, c.transform, g);
rectangle(GREEN.alpha(0.4).into(), bullets_bar, c.transform, g);
text(WHITE.into(), 10, &fps, glyphs.deref_mut(), c.transform.trans(0., 10.), g);
});
if self.state.rotate_cam {
let w = 20.;
let x = self.config.screen_size.w as f64 /2. - w/2.;
let y = self.config.screen_size.h as f64 /2. - w/2.;
let ellipse = self.ellipse.borrow();
self.window.draw_2d(e, |c, g| {
ellipse.draw([x, y, w, w], &c.draw_state, c.transform, g);
rectangle(RED.into(), [x+w/2.-1., y+w/2.-1., 2., 2.], c.transform, g);
});
}
}
// `dt` stands for delta, duration since the last update
fn update(&mut self, dt: f64) {
// Re-calculate delta according to fps
let dt = if self.state.fps!= 0. { 1./self.state.fps}
else { dt };
let old = self.world.player.position;
if self.state.bullets <= 0 {
self.state.recharge -= dt;
if self.state.recharge < 0. {
self.state.bullets = self.config.bullet_stock;
}
}
self.state.jump_timeout -= dt;
if self.state.game_speed < self.config.game_max_speed {
self.state.game_speed += dt*self.config.game_sprint;
}
if self.state.sprint {
if self.world.player.speed < self.config.player_speed.1 {
self.world.player.speed += dt*self.config.sprint_factor;
}
} else if self.world.player.speed > self.config.player_speed.0 {
self.world.player.speed -= dt*self.config.sprint_factor;
}
self.state.spawn -= dt;
if self.state.spawn < 0. {
self.world.add_bot(&self.bot_rules);
self.state.spawn += ::rnd(self.config.spawn_time);
}
match self.state.turn {
Turn::Left => self.world.player.turn_left(dt),
Turn::Right => self.world.player.turn_right(dt),
Turn::None => (),
}
// Update objects in the world
self.world.update(dt, self.state.game_speed);
// Validate things like object's boundary, bullets and boxes
// collisions.
self.world.validate();
// Update camera's location
self.camera.eye += self.world.player.position - old;
// Check for player's collision with bot
if self.world.bots.iter().any(|x| self.world.player.crashed(&x.car)) {
self.state.ended = true;
}
}
}
fn convert(x: [Vector2<f64>; 2]) -> [f64; 4] {
[x[0].x, x[0].y, x[1].x, x[1].y]
}
| {
match key {
Button::Keyboard(Key::A) => if let Turn::Left = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::D) => if let Turn::Right = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::W) => self.state.sprint = false,
Button::Mouse(MouseButton::Right) => {
self.state.rotate_cam = false;
self.camera = Game::new_camera(&self.config, &self.world.player);
},
_ => (),
}
} | identifier_body |
game.rs | use world::World;
use piston_window::*;
use camera::Camera;
use cgmath::{Vector2, vec3};
use cgmath::prelude::*;
use color::*;
use car::*;
use piston_window::Ellipse;
use bot::BoxRules;
use std::cell::RefCell;
use std::ops::DerefMut;
use std::time::Instant;
// `Game` contains every things to run the game
pub struct Game {
config: GameConfig,
world: World, // All objects in the game
window: PistonWindow,
bot_rules: BoxRules, // Rules to create a new bot
camera: Camera, // Camera for rendering
state: State, // Current state of game
// Wrap these caches in `RefCell` to allow interior mutability
glyphs: RefCell<Glyphs>, // Font cache
ellipse: RefCell<Ellipse>, // Model to draw a circle
}
struct State {
pub turn: Turn, // Presents movement of player
pub sprint: bool, // Player is speeding-up or not
pub spawn: f64, // Count down time to spawn a new bot
pub ended: bool, // Game is over or not
pub game_speed: f64, // Game speed in addition to player's speed
pub jump_timeout: f64, // Count down to allow the next jump
pub rotate_cam: bool, // Allow rotation of camera or not
pub bullets: i64, // The number of bullets left
pub recharge: f64, // Bullets recharge time
pub fps: f64, // Real fps of game
pub last_frame: Instant, // Moment of the last draw
}
pub enum Turn { Left, Right, None, }
// Configurable game's contansts.
// A tuple presents a range of something.
#[derive(Serialize, Deserialize, Clone)]
pub struct GameConfig {
pub title: String,
pub screen_size: ::Pixel,
pub ups: u64, // Update per second
pub max_fps: u64,
pub tunel_size: [f64; 3],
pub player_size: [f64; 3],
pub player_speed: (f64, f64), // min and max player speed
pub player_turn_speed: f64,
pub bot_size: [(f64, f64); 3], // Range of bot's size
pub bot_speed: (f64, f64),
pub bot_turn_speed: (f64, f64),
pub divider_size: [f64; 2],
pub camera_height: f64, // Height of camera (from player)
pub camera_distance: f64, // Distance from camera to player
pub decor_distance: f64, // Distance between each decoration
pub sprint_factor: f64,
pub spawn_time: (f64, f64),
pub game_sprint: f64, // The increase of game_speed
pub game_max_speed: f64,
pub player_jump_v: f64,
pub player_jump_a: f64,
pub jump_turn_decrease: f64,
pub jump_timeout: f64,
pub mouse_speed: f64,
pub trueshot_distance: f64,
pub bullet_stock: i64, // Number of bullets
pub recharge_time: f64,
pub bullet_len: f64,
pub bullet_speed: f64,
pub zoom_in: bool, // If true, zoom-in while on stare mode
}
impl Game {
pub fn new(config: GameConfig) -> Game {
let mut window: PistonWindow = WindowSettings::new(
config.title.clone(), [config.screen_size.w, config.screen_size.h])
.exit_on_esc(true).build()
.expect("Cannot create window.");
window.set_ups(config.ups);
window.set_max_fps(config.max_fps);
window.set_capture_cursor(true);
let glyphs = Glyphs::new("resources/Ubuntu-R.ttf", window.factory.clone())
.expect("Unable to load font.");
let bot_rules = BoxRules {
size: config.bot_size,
position: [(0., config.tunel_size[0]), (0., 0.), (config.tunel_size[2], config.tunel_size[2])],
speed: config.bot_speed,
turn_speed: config.bot_turn_speed,
color: vec![RED, ORANGE, VIOLET, GREEN, PALE],
jump_turn_decrease: config.jump_turn_decrease,
};
let world = World::new(&config);
let camera = Game::new_camera(&config, &world.player);
let state = State {
turn: Turn::None,
sprint: false,
spawn: 0.,
ended: false,
game_speed: 0.,
jump_timeout: 0.,
rotate_cam: false,
bullets: config.bullet_stock,
recharge: 0.,
fps: 0.,
last_frame: Instant::now(),
};
let ellipse = Ellipse {
color: BLACK.alpha(0.).into(),
border: Some(ellipse::Border {
color: RED.alpha(0.5).into(),
radius: 1.,
}),
resolution: 16,
};
Game {
config: config,
world: world,
window: window,
bot_rules: bot_rules,
camera: camera,
state: state,
glyphs: RefCell::new(glyphs),
ellipse: RefCell::new(ellipse),
}
}
fn | <T: Car>(config: &GameConfig, player: &T) -> Camera {
Camera::new(
config.screen_size.clone(),
vec3(0., config.camera_height, -config.camera_distance) + player.pos()
)
}
// Re-calculate fps
fn update_fps(&mut self) {
let d = self.state.last_frame.elapsed();
self.state.last_frame = Instant::now();
self.state.fps = 1. / (d.as_secs() as f64 + 1e-9*d.subsec_nanos() as f64);
}
pub fn run(&mut self) {
while let Some(e) = self.window.next() {
match e {
Input::Press(key) => self.press(key),
Input::Release(key) => self.release(key),
Input::Render(_) => {
self.update_fps();
self.draw(&e);
},
Input::Update(args) => self.update(args.dt),
Input::Move(Motion::MouseRelative(a, b)) => self.mouse_move(a as f64, b as f64),
_ => {}
}
if self.state.ended {
break;
}
}
}
fn mouse_move(&mut self, x: f64, y: f64) {
if self.state.rotate_cam {
self.camera.rotate(x*self.config.mouse_speed, y*self.config.mouse_speed, self.world.player.position);
}
}
fn press(&mut self, key: Button) {
match key {
Button::Keyboard(Key::A) => self.state.turn = Turn::Left,
Button::Keyboard(Key::D) => self.state.turn = Turn::Right,
Button::Keyboard(Key::W) => self.state.sprint = true,
Button::Keyboard(Key::Space) => if self.state.jump_timeout <= 0. {
self.state.jump_timeout = self.config.jump_timeout;
self.world.player.jump();
},
Button::Mouse(MouseButton::Right) => {
if self.config.zoom_in {
self.camera.zoom_in();
}
self.state.rotate_cam = true;
},
Button::Mouse(MouseButton::Left) => if self.state.rotate_cam && self.state.bullets > 0 {
let mut pos = self.world.player.position;
pos.y += self.world.player.size.y;
let mut d = vec3(0., 0., self.config.trueshot_distance + self.config.camera_distance);
d = self.camera.c * d.magnitude2() / d.dot(self.camera.c);
d = self.camera.eye + d - pos;
d = d * self.config.bullet_speed / d.magnitude();
self.world.add_bullet(pos, d, self.config.bullet_len);
self.state.bullets -= 1;
if self.state.bullets <= 0 {
self.state.recharge = self.config.recharge_time;
}
},
_ => (),
}
}
fn release(&mut self, key: Button) {
match key {
Button::Keyboard(Key::A) => if let Turn::Left = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::D) => if let Turn::Right = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::W) => self.state.sprint = false,
Button::Mouse(MouseButton::Right) => {
self.state.rotate_cam = false;
self.camera = Game::new_camera(&self.config, &self.world.player);
},
_ => (),
}
}
fn draw(&mut self, e: &Input) {
// Return a horizontal bar
macro_rules! bar {
($curr: expr, $full: expr) => {
[0.,
15.0,
self.config.screen_size.w as f64/2.*$curr/$full,
20.0,]
};
}
let jump_bar = bar!(self.state.jump_timeout, self.config.jump_timeout);
let recharge_bar = bar!(self.state.recharge, self.config.recharge_time);
let bullets_bar = bar!(self.state.bullets as f64, self.config.bullet_stock as f64);
// Closure in `draw_2d` requires unique access to `self`,
// so we use RefCell to hack it.
let mut glyphs = self.glyphs.borrow_mut();
let fps = format!("{:.3}", self.state.fps);
let lines = self.world.render(&self.camera);
self.window.draw_2d(e, |c, g| {
clear(BLACK.into(), g);
for (l, color) in lines {
line(color.into(), 1., convert(l), c.transform, g);
}
rectangle(BLUE.alpha(0.4).into(), jump_bar, c.transform, g);
rectangle(RED.alpha(0.4).into(), recharge_bar, c.transform, g);
rectangle(GREEN.alpha(0.4).into(), bullets_bar, c.transform, g);
text(WHITE.into(), 10, &fps, glyphs.deref_mut(), c.transform.trans(0., 10.), g);
});
if self.state.rotate_cam {
let w = 20.;
let x = self.config.screen_size.w as f64 /2. - w/2.;
let y = self.config.screen_size.h as f64 /2. - w/2.;
let ellipse = self.ellipse.borrow();
self.window.draw_2d(e, |c, g| {
ellipse.draw([x, y, w, w], &c.draw_state, c.transform, g);
rectangle(RED.into(), [x+w/2.-1., y+w/2.-1., 2., 2.], c.transform, g);
});
}
}
// `dt` stands for delta, duration since the last update
fn update(&mut self, dt: f64) {
// Re-calculate delta according to fps
let dt = if self.state.fps!= 0. { 1./self.state.fps}
else { dt };
let old = self.world.player.position;
if self.state.bullets <= 0 {
self.state.recharge -= dt;
if self.state.recharge < 0. {
self.state.bullets = self.config.bullet_stock;
}
}
self.state.jump_timeout -= dt;
if self.state.game_speed < self.config.game_max_speed {
self.state.game_speed += dt*self.config.game_sprint;
}
if self.state.sprint {
if self.world.player.speed < self.config.player_speed.1 {
self.world.player.speed += dt*self.config.sprint_factor;
}
} else if self.world.player.speed > self.config.player_speed.0 {
self.world.player.speed -= dt*self.config.sprint_factor;
}
self.state.spawn -= dt;
if self.state.spawn < 0. {
self.world.add_bot(&self.bot_rules);
self.state.spawn += ::rnd(self.config.spawn_time);
}
match self.state.turn {
Turn::Left => self.world.player.turn_left(dt),
Turn::Right => self.world.player.turn_right(dt),
Turn::None => (),
}
// Update objects in the world
self.world.update(dt, self.state.game_speed);
// Validate things like object's boundary, bullets and boxes
// collisions.
self.world.validate();
// Update camera's location
self.camera.eye += self.world.player.position - old;
// Check for player's collision with bot
if self.world.bots.iter().any(|x| self.world.player.crashed(&x.car)) {
self.state.ended = true;
}
}
}
fn convert(x: [Vector2<f64>; 2]) -> [f64; 4] {
[x[0].x, x[0].y, x[1].x, x[1].y]
}
| new_camera | identifier_name |
game.rs | use world::World;
use piston_window::*;
use camera::Camera;
use cgmath::{Vector2, vec3};
use cgmath::prelude::*;
use color::*;
use car::*;
use piston_window::Ellipse;
use bot::BoxRules;
use std::cell::RefCell;
use std::ops::DerefMut;
use std::time::Instant;
// `Game` contains every things to run the game
pub struct Game {
config: GameConfig,
world: World, // All objects in the game
window: PistonWindow,
bot_rules: BoxRules, // Rules to create a new bot
camera: Camera, // Camera for rendering
state: State, // Current state of game
// Wrap these caches in `RefCell` to allow interior mutability
glyphs: RefCell<Glyphs>, // Font cache
ellipse: RefCell<Ellipse>, // Model to draw a circle
}
struct State {
pub turn: Turn, // Presents movement of player
pub sprint: bool, // Player is speeding-up or not
pub spawn: f64, // Count down time to spawn a new bot
pub ended: bool, // Game is over or not
pub game_speed: f64, // Game speed in addition to player's speed
pub jump_timeout: f64, // Count down to allow the next jump
pub rotate_cam: bool, // Allow rotation of camera or not
pub bullets: i64, // The number of bullets left
pub recharge: f64, // Bullets recharge time
pub fps: f64, // Real fps of game
pub last_frame: Instant, // Moment of the last draw
}
pub enum Turn { Left, Right, None, }
// Configurable game's contansts.
// A tuple presents a range of something.
#[derive(Serialize, Deserialize, Clone)]
pub struct GameConfig {
pub title: String,
pub screen_size: ::Pixel,
pub ups: u64, // Update per second
pub max_fps: u64,
pub tunel_size: [f64; 3],
pub player_size: [f64; 3],
pub player_speed: (f64, f64), // min and max player speed
pub player_turn_speed: f64,
pub bot_size: [(f64, f64); 3], // Range of bot's size
pub bot_speed: (f64, f64),
pub bot_turn_speed: (f64, f64),
pub divider_size: [f64; 2],
pub camera_height: f64, // Height of camera (from player)
pub camera_distance: f64, // Distance from camera to player
pub decor_distance: f64, // Distance between each decoration
pub sprint_factor: f64,
pub spawn_time: (f64, f64),
pub game_sprint: f64, // The increase of game_speed
pub game_max_speed: f64,
pub player_jump_v: f64,
pub player_jump_a: f64,
pub jump_turn_decrease: f64,
pub jump_timeout: f64,
pub mouse_speed: f64,
pub trueshot_distance: f64,
pub bullet_stock: i64, // Number of bullets
pub recharge_time: f64,
pub bullet_len: f64,
pub bullet_speed: f64,
pub zoom_in: bool, // If true, zoom-in while on stare mode
}
impl Game {
pub fn new(config: GameConfig) -> Game {
let mut window: PistonWindow = WindowSettings::new(
config.title.clone(), [config.screen_size.w, config.screen_size.h])
.exit_on_esc(true).build()
.expect("Cannot create window.");
window.set_ups(config.ups);
window.set_max_fps(config.max_fps);
window.set_capture_cursor(true);
let glyphs = Glyphs::new("resources/Ubuntu-R.ttf", window.factory.clone())
.expect("Unable to load font.");
let bot_rules = BoxRules {
size: config.bot_size,
position: [(0., config.tunel_size[0]), (0., 0.), (config.tunel_size[2], config.tunel_size[2])],
speed: config.bot_speed,
turn_speed: config.bot_turn_speed,
color: vec![RED, ORANGE, VIOLET, GREEN, PALE],
jump_turn_decrease: config.jump_turn_decrease,
};
let world = World::new(&config);
let camera = Game::new_camera(&config, &world.player);
let state = State {
turn: Turn::None,
sprint: false,
spawn: 0.,
ended: false,
game_speed: 0.,
jump_timeout: 0.,
rotate_cam: false,
bullets: config.bullet_stock,
recharge: 0.,
fps: 0.,
last_frame: Instant::now(),
};
let ellipse = Ellipse {
color: BLACK.alpha(0.).into(),
border: Some(ellipse::Border {
color: RED.alpha(0.5).into(),
radius: 1.,
}),
resolution: 16,
};
Game {
config: config,
world: world,
window: window,
bot_rules: bot_rules,
camera: camera,
state: state,
glyphs: RefCell::new(glyphs),
ellipse: RefCell::new(ellipse),
}
}
fn new_camera<T: Car>(config: &GameConfig, player: &T) -> Camera {
Camera::new(
config.screen_size.clone(),
vec3(0., config.camera_height, -config.camera_distance) + player.pos()
)
}
// Re-calculate fps
fn update_fps(&mut self) {
let d = self.state.last_frame.elapsed();
self.state.last_frame = Instant::now();
self.state.fps = 1. / (d.as_secs() as f64 + 1e-9*d.subsec_nanos() as f64);
}
pub fn run(&mut self) {
while let Some(e) = self.window.next() {
match e {
Input::Press(key) => self.press(key),
Input::Release(key) => self.release(key),
Input::Render(_) => {
self.update_fps();
self.draw(&e);
},
Input::Update(args) => self.update(args.dt),
Input::Move(Motion::MouseRelative(a, b)) => self.mouse_move(a as f64, b as f64),
_ => {}
}
if self.state.ended {
break;
}
}
}
fn mouse_move(&mut self, x: f64, y: f64) {
if self.state.rotate_cam {
self.camera.rotate(x*self.config.mouse_speed, y*self.config.mouse_speed, self.world.player.position);
}
}
fn press(&mut self, key: Button) {
match key {
Button::Keyboard(Key::A) => self.state.turn = Turn::Left,
Button::Keyboard(Key::D) => self.state.turn = Turn::Right,
Button::Keyboard(Key::W) => self.state.sprint = true,
Button::Keyboard(Key::Space) => if self.state.jump_timeout <= 0. {
self.state.jump_timeout = self.config.jump_timeout;
self.world.player.jump();
},
Button::Mouse(MouseButton::Right) => {
if self.config.zoom_in {
self.camera.zoom_in();
}
self.state.rotate_cam = true;
},
Button::Mouse(MouseButton::Left) => if self.state.rotate_cam && self.state.bullets > 0 {
let mut pos = self.world.player.position;
pos.y += self.world.player.size.y;
let mut d = vec3(0., 0., self.config.trueshot_distance + self.config.camera_distance);
d = self.camera.c * d.magnitude2() / d.dot(self.camera.c);
d = self.camera.eye + d - pos;
d = d * self.config.bullet_speed / d.magnitude();
self.world.add_bullet(pos, d, self.config.bullet_len);
self.state.bullets -= 1;
if self.state.bullets <= 0 {
self.state.recharge = self.config.recharge_time;
}
},
_ => (),
}
}
fn release(&mut self, key: Button) {
match key {
Button::Keyboard(Key::A) => if let Turn::Left = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::D) => if let Turn::Right = self.state.turn {
self.state.turn = Turn::None;
},
Button::Keyboard(Key::W) => self.state.sprint = false,
Button::Mouse(MouseButton::Right) => {
self.state.rotate_cam = false;
self.camera = Game::new_camera(&self.config, &self.world.player);
},
_ => (),
}
}
fn draw(&mut self, e: &Input) {
// Return a horizontal bar
macro_rules! bar {
($curr: expr, $full: expr) => {
[0.,
15.0,
self.config.screen_size.w as f64/2.*$curr/$full,
20.0,]
};
}
let jump_bar = bar!(self.state.jump_timeout, self.config.jump_timeout);
let recharge_bar = bar!(self.state.recharge, self.config.recharge_time);
let bullets_bar = bar!(self.state.bullets as f64, self.config.bullet_stock as f64);
// Closure in `draw_2d` requires unique access to `self`,
// so we use RefCell to hack it.
let mut glyphs = self.glyphs.borrow_mut();
let fps = format!("{:.3}", self.state.fps);
let lines = self.world.render(&self.camera);
self.window.draw_2d(e, |c, g| {
clear(BLACK.into(), g);
for (l, color) in lines {
line(color.into(), 1., convert(l), c.transform, g);
}
rectangle(BLUE.alpha(0.4).into(), jump_bar, c.transform, g);
rectangle(RED.alpha(0.4).into(), recharge_bar, c.transform, g);
rectangle(GREEN.alpha(0.4).into(), bullets_bar, c.transform, g);
text(WHITE.into(), 10, &fps, glyphs.deref_mut(), c.transform.trans(0., 10.), g);
});
if self.state.rotate_cam {
let w = 20.;
let x = self.config.screen_size.w as f64 /2. - w/2.;
let y = self.config.screen_size.h as f64 /2. - w/2.;
let ellipse = self.ellipse.borrow();
self.window.draw_2d(e, |c, g| {
ellipse.draw([x, y, w, w], &c.draw_state, c.transform, g);
rectangle(RED.into(), [x+w/2.-1., y+w/2.-1., 2., 2.], c.transform, g);
});
}
}
// `dt` stands for delta, duration since the last update
fn update(&mut self, dt: f64) {
// Re-calculate delta according to fps
let dt = if self.state.fps!= 0. { 1./self.state.fps}
else { dt };
let old = self.world.player.position;
if self.state.bullets <= 0 {
self.state.recharge -= dt;
if self.state.recharge < 0. {
self.state.bullets = self.config.bullet_stock;
}
}
self.state.jump_timeout -= dt;
if self.state.game_speed < self.config.game_max_speed {
self.state.game_speed += dt*self.config.game_sprint; | self.world.player.speed += dt*self.config.sprint_factor;
}
} else if self.world.player.speed > self.config.player_speed.0 {
self.world.player.speed -= dt*self.config.sprint_factor;
}
self.state.spawn -= dt;
if self.state.spawn < 0. {
self.world.add_bot(&self.bot_rules);
self.state.spawn += ::rnd(self.config.spawn_time);
}
match self.state.turn {
Turn::Left => self.world.player.turn_left(dt),
Turn::Right => self.world.player.turn_right(dt),
Turn::None => (),
}
// Update objects in the world
self.world.update(dt, self.state.game_speed);
// Validate things like object's boundary, bullets and boxes
// collisions.
self.world.validate();
// Update camera's location
self.camera.eye += self.world.player.position - old;
// Check for player's collision with bot
if self.world.bots.iter().any(|x| self.world.player.crashed(&x.car)) {
self.state.ended = true;
}
}
}
fn convert(x: [Vector2<f64>; 2]) -> [f64; 4] {
[x[0].x, x[0].y, x[1].x, x[1].y]
} | }
if self.state.sprint {
if self.world.player.speed < self.config.player_speed.1 { | random_line_split |
graph.rs | use crate::{CommandEncoder, CommandEncoderOutput};
use generational_arena::Arena;
use moonwave_resources::{BindGroup, Buffer, ResourceRc, SampledTexture, TextureView};
use multimap::MultiMap;
use parking_lot::{RwLock, RwLockReadGuard};
use rayon::{prelude::*, ThreadPool};
use std::{
collections::HashMap,
fmt::{Debug, Formatter},
sync::Arc,
};
pub use generational_arena::Index;
pub trait FrameGraphNode: Send + Sync +'static {
fn execute(
&self,
_inputs: &[Option<FrameNodeValue>],
_outputs: &mut [Option<FrameNodeValue>],
_encoder: &mut CommandEncoder,
) {
}
fn execute_raw(
&self,
inputs: &[Option<FrameNodeValue>],
outputs: &mut [Option<FrameNodeValue>],
device: &wgpu::Device,
_queue: &wgpu::Queue,
_sc_frame: &wgpu::SwapChainFrame,
) -> CommandEncoderOutput {
let mut encoder = CommandEncoder::new(device, "NodeGraphEncoder");
self.execute(inputs, outputs, &mut encoder);
encoder.finish()
}
}
const MAX_LAYERS: usize = 8;
const MAX_NODES_PER_LAYER: usize = 8;
const MAX_INPUT_OUTPUTS_PER_NODE: usize = 16;
struct ConnectedNode {
name: String,
node: Arc<dyn FrameGraphNode>,
inputs: [Option<Index>; MAX_INPUT_OUTPUTS_PER_NODE],
}
struct ConnectedEdges {
owner_node_index: Index,
output_index: usize,
}
pub struct FrameGraph {
node_arena: RwLock<Arena<ConnectedNode>>,
edges_arena: RwLock<Arena<ConnectedEdges>>,
end_node: Index,
output_map: Vec<Vec<Option<FrameNodeValue>>>,
levels_map: MultiMap<usize, TraversedGraphNode>,
traversed_node_cache: HashMap<Index, usize>,
}
impl FrameGraph {
/// Creates a new empty graph.
pub fn new<T: FrameGraphNode>(end_node: T) -> Self {
let mut node_arena = Arena::with_capacity(MAX_LAYERS * MAX_NODES_PER_LAYER);
let end_node = node_arena.insert(ConnectedNode {
name: "EndNode".to_string(),
node: Arc::new(end_node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
Self {
node_arena: RwLock::new(node_arena),
edges_arena: RwLock::new(Arena::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
)),
output_map: vec![vec![None; MAX_NODES_PER_LAYER * MAX_INPUT_OUTPUTS_PER_NODE]; MAX_LAYERS],
levels_map: MultiMap::with_capacity(MAX_LAYERS),
traversed_node_cache: HashMap::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
),
end_node,
}
}
/// Returns the end node.
pub fn get_end_node(&self) -> Index {
self.end_node
}
/// Resets the frame graph by removing all nodes and sets up a new end node.
pub fn reset(&mut self) {
let mut nodes = self.node_arena.write();
let end_node_impl = nodes.get(self.end_node).unwrap().node.clone();
nodes.clear();
self.traversed_node_cache.clear();
self.edges_arena.write().clear();
self.end_node = nodes.insert(ConnectedNode {
name: "EndNode".to_string(),
node: end_node_impl,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
}
/// Add a new node into the graph.
pub fn add_node<T: FrameGraphNode>(&self, node: T, name: &str) -> Index {
self.node_arena.write().insert(ConnectedNode {
name: name.to_string(),
node: Arc::new(node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
})
}
/// Connects one nodes output to another nodes input.
pub fn connect(
&self,
source: Index,
source_output: usize,
destination: Index,
destination_input: usize,
) -> Result<(), GraphConnectError> {
// Validate connection parameters.
if destination_input >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
if source_output >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
let mut edges = self.edges_arena.write();
let mut nodes = self.node_arena.write();
let destination_node = nodes
.get_mut(destination)
.ok_or(GraphConnectError::InvalidDestination)?;
// Target input is already connected.
if destination_node.inputs[destination_input].is_some() {
return Err(GraphConnectError::AlreadyConnected);
}
// Target input is empty so simply create the connection.
let edge = edges.insert(ConnectedEdges {
owner_node_index: source,
output_index: source_output,
});
destination_node.inputs[destination_input] = Some(edge);
Ok(())
}
fn traverse_node(
cache: &mut HashMap<Index, usize>,
levels_map: &mut MultiMap<usize, TraversedGraphNode>,
nodes: &RwLockReadGuard<Arena<ConnectedNode>>,
edges: &RwLockReadGuard<Arena<ConnectedEdges>>,
node_index: Index,
level: usize,
) {
//Build traverse node with input/output mapping info.
let mut traversed_node = TraversedGraphNode {
index: node_index,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
};
// Remove from dependencies from all levels lower
let mut has_retained = false;
for l in level..0 {
// Remove previous traversed node from level.
let vec = levels_map.get_vec_mut(&l).unwrap();
let before_len = vec.len();
vec.retain(|x| x.index!= node_index);
if before_len!= vec.len() {
has_retained = true;
}
}
// Update all inputs that still reference kicked out node.
if has_retained {
for l in level..0 {
let vec = levels_map.get_vec_mut(&l).unwrap();
for node in vec {
for input in &mut node.inputs {
if let Some((nlevel, _, index)) = input {
if index == &node_index {
*nlevel = level;
}
}
}
}
}
}
// Loop through all inputs
let next_level = level + 1;
let node = nodes.get(node_index).unwrap();
for (input_index, input) in node.inputs.iter().enumerate() {
if let Some(input) = input {
let edge = edges.get(*input).unwrap();
let inner_node = edge.owner_node_index;
traversed_node.inputs[input_index] = Some((next_level, edge.output_index, inner_node));
Self::traverse_node(cache, levels_map, nodes, edges, inner_node, next_level);
}
}
// Store traversed node at level.
//let traversed_index = levels_map.get_vec(&level).map(|x| x.len()).unwrap_or(0);
//cache.insert(node_index, traversed_index);
// TODO: Due to retaining this index breaks currently :'(
levels_map.insert(level, traversed_node);
}
/// Executes the graph using the given scheduler.
pub fn execute<T: DeviceHost>(
&mut self,
sc_frame: Arc<wgpu::SwapChainFrame>,
device_host: &'static T,
pool: &ThreadPool,
) {
{
{
optick::event!("FrameGraph::traverse");
// Gain read access to nodes and connections.
let nodes = self.node_arena.read();
let edges = self.edges_arena.read();
// Start traversing from end.
self.levels_map.clear();
Self::traverse_node(
&mut self.traversed_node_cache,
&mut self.levels_map,
&nodes,
&edges,
self.end_node,
0,
);
}
let cache = &mut self.traversed_node_cache;
// Create async executer.
let mut local_pool = futures::executor::LocalPool::new();
let local_spawner = local_pool.spawner();
// Execute in levels order
let mut all_levels = self.levels_map.keys().cloned().collect::<Vec<_>>();
all_levels.sort_unstable();
let max_levels = all_levels.len();
for level in all_levels.into_iter().rev() { | optick::tag!("level", level as u32);
// Get rid of duplicated nodes.
let mut nodes_in_level = self.levels_map.get_vec_mut(&level).unwrap().clone();
nodes_in_level.sort_unstable_by_key(|x| x.index);
nodes_in_level.dedup_by_key(|x| x.index);
// Build cache for this level
for (index, node) in nodes_in_level.iter().enumerate() {
cache.insert(node.index, index);
}
// Get chunks
let nodes = self.node_arena.read();
let read_nodes = nodes_in_level
.iter()
.map(|node| (nodes.get(node.index).unwrap(), node.inputs))
.collect::<Vec<_>>();
let mut empty = [Vec::with_capacity(0)];
#[allow(clippy::type_complexity)]
let (outputs, previous_outputs): (
&mut [Vec<Option<FrameNodeValue>>],
&mut [Vec<Option<FrameNodeValue>>],
) = if level == (max_levels - 1) {
(&mut self.output_map, &mut empty)
} else {
self.output_map.split_at_mut(level + 1)
};
let outputs_per_node = outputs[outputs.len() - 1]
.chunks_mut(MAX_INPUT_OUTPUTS_PER_NODE)
.enumerate()
.collect::<Vec<_>>();
// Execute
let encoder_outputs = pool.install(|| {
read_nodes
.par_iter()
.zip(outputs_per_node)
.enumerate()
.map(|(_i, ((node, inputs), (_oi, outputs)))| {
optick::event!("FrameGraph::node");
// Prepare node execution
optick::tag!("name", node.name);
let node_trait = node.node.clone();
let label = format!("NodeCommandEncoder_{}", node.name);
// Map outputs -> inputs.
/*
for (idx, input) in inputs.iter().enumerate() {
if let Some((target_level, output_index, node_index)) = input {
let i = cache.get(&node_index).unwrap();
println!(
"Mapping input #{} to level = {} ({}) and index = {} ({}, {})",
idx,
target_level,
previous_outputs.len() - (target_level - level),
i * MAX_INPUT_OUTPUTS_PER_NODE + output_index,
i,
output_index
);
} else {
println!("Mapping input #{} to None", i);
}
}
*/
let inputs = inputs
.iter()
.map(|input| {
input.map(|(target_level, output_index, node_index)| {
let i = cache.get(&node_index).unwrap();
&previous_outputs[previous_outputs.len() - (target_level - level)]
[i * MAX_INPUT_OUTPUTS_PER_NODE + output_index]
})
})
.map(|input| match input {
Some(Some(rf)) => Some(rf.clone()),
_ => None,
})
.collect::<Vec<_>>();
let sc_cloned = sc_frame.clone();
let out = {
optick::event!("FrameGraph::record_commands");
optick::tag!("name", label);
// Execute node asynchronisly.
node_trait.execute_raw(
&inputs,
outputs,
device_host.get_device(),
device_host.get_queue(),
&*sc_cloned,
)
};
out
})
.collect::<Vec<_>>()
});
{
optick::event!("FrameGraph::submit_level");
optick::tag!("level", level as u32);
let mut buffers = Vec::with_capacity(encoder_outputs.len());
for out in encoder_outputs {
if let Some(buffer) = out.command_buffer {
buffers.push(buffer);
}
}
device_host.get_queue().submit(buffers);
}
}
}
// Reset
optick::event!("FrameGraph::reset");
self.reset();
}
}
#[derive(Clone)]
pub enum FrameNodeValue {
Buffer(ResourceRc<Buffer>),
BindGroup(ResourceRc<BindGroup>),
TextureView(ResourceRc<TextureView>),
SampledTexture(SampledTexture),
}
impl std::fmt::Debug for FrameNodeValue {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Buffer(_) => f.write_str("Buffer"),
Self::BindGroup(_) => f.write_str("BindGroup"),
Self::TextureView(_) => f.write_str("Texture"),
Self::SampledTexture(_) => f.write_str("SampledTexture"),
}
}
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum GraphConnectError {
#[error("The target node has reached its input limit")]
MaximumInputsReached,
#[error("The source node has reached its outputs limit")]
MaximumOutputsReached,
#[error("The target node does not exist")]
InvalidDestination,
#[error("The target nodes input is already connected")]
AlreadyConnected,
}
#[derive(Clone)]
struct TraversedGraphNode {
index: Index,
inputs: [Option<(usize, usize, Index)>; MAX_INPUT_OUTPUTS_PER_NODE],
}
pub trait DeviceHost: Send + Sync +'static {
fn get_device(&self) -> &wgpu::Device;
fn get_queue(&self) -> &wgpu::Queue;
}
macro_rules! impl_get_node_specific {
($getter:ident, $ty:ident, $rty:ty) => {
impl FrameNodeValue {
pub fn $getter(&self) -> &$rty {
match self {
FrameNodeValue::$ty(group) => group,
_ => panic!(
"Unexpected frame node value, expected '{}' but received '{:?}'",
stringify!($ty),
self
),
}
}
}
};
}
impl_get_node_specific!(get_bind_group, BindGroup, ResourceRc<BindGroup>);
impl_get_node_specific!(get_texture_view, TextureView, ResourceRc<TextureView>);
impl_get_node_specific!(get_sampled_texture, SampledTexture, SampledTexture); | optick::event!("FrameGraph::execute_level"); | random_line_split |
graph.rs | use crate::{CommandEncoder, CommandEncoderOutput};
use generational_arena::Arena;
use moonwave_resources::{BindGroup, Buffer, ResourceRc, SampledTexture, TextureView};
use multimap::MultiMap;
use parking_lot::{RwLock, RwLockReadGuard};
use rayon::{prelude::*, ThreadPool};
use std::{
collections::HashMap,
fmt::{Debug, Formatter},
sync::Arc,
};
pub use generational_arena::Index;
pub trait FrameGraphNode: Send + Sync +'static {
fn execute(
&self,
_inputs: &[Option<FrameNodeValue>],
_outputs: &mut [Option<FrameNodeValue>],
_encoder: &mut CommandEncoder,
) {
}
fn execute_raw(
&self,
inputs: &[Option<FrameNodeValue>],
outputs: &mut [Option<FrameNodeValue>],
device: &wgpu::Device,
_queue: &wgpu::Queue,
_sc_frame: &wgpu::SwapChainFrame,
) -> CommandEncoderOutput {
let mut encoder = CommandEncoder::new(device, "NodeGraphEncoder");
self.execute(inputs, outputs, &mut encoder);
encoder.finish()
}
}
const MAX_LAYERS: usize = 8;
const MAX_NODES_PER_LAYER: usize = 8;
const MAX_INPUT_OUTPUTS_PER_NODE: usize = 16;
struct ConnectedNode {
name: String,
node: Arc<dyn FrameGraphNode>,
inputs: [Option<Index>; MAX_INPUT_OUTPUTS_PER_NODE],
}
struct ConnectedEdges {
owner_node_index: Index,
output_index: usize,
}
pub struct | {
node_arena: RwLock<Arena<ConnectedNode>>,
edges_arena: RwLock<Arena<ConnectedEdges>>,
end_node: Index,
output_map: Vec<Vec<Option<FrameNodeValue>>>,
levels_map: MultiMap<usize, TraversedGraphNode>,
traversed_node_cache: HashMap<Index, usize>,
}
impl FrameGraph {
/// Creates a new empty graph.
pub fn new<T: FrameGraphNode>(end_node: T) -> Self {
let mut node_arena = Arena::with_capacity(MAX_LAYERS * MAX_NODES_PER_LAYER);
let end_node = node_arena.insert(ConnectedNode {
name: "EndNode".to_string(),
node: Arc::new(end_node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
Self {
node_arena: RwLock::new(node_arena),
edges_arena: RwLock::new(Arena::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
)),
output_map: vec![vec![None; MAX_NODES_PER_LAYER * MAX_INPUT_OUTPUTS_PER_NODE]; MAX_LAYERS],
levels_map: MultiMap::with_capacity(MAX_LAYERS),
traversed_node_cache: HashMap::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
),
end_node,
}
}
/// Returns the end node.
pub fn get_end_node(&self) -> Index {
self.end_node
}
/// Resets the frame graph by removing all nodes and sets up a new end node.
pub fn reset(&mut self) {
let mut nodes = self.node_arena.write();
let end_node_impl = nodes.get(self.end_node).unwrap().node.clone();
nodes.clear();
self.traversed_node_cache.clear();
self.edges_arena.write().clear();
self.end_node = nodes.insert(ConnectedNode {
name: "EndNode".to_string(),
node: end_node_impl,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
}
/// Add a new node into the graph.
pub fn add_node<T: FrameGraphNode>(&self, node: T, name: &str) -> Index {
self.node_arena.write().insert(ConnectedNode {
name: name.to_string(),
node: Arc::new(node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
})
}
/// Connects one nodes output to another nodes input.
pub fn connect(
&self,
source: Index,
source_output: usize,
destination: Index,
destination_input: usize,
) -> Result<(), GraphConnectError> {
// Validate connection parameters.
if destination_input >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
if source_output >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
let mut edges = self.edges_arena.write();
let mut nodes = self.node_arena.write();
let destination_node = nodes
.get_mut(destination)
.ok_or(GraphConnectError::InvalidDestination)?;
// Target input is already connected.
if destination_node.inputs[destination_input].is_some() {
return Err(GraphConnectError::AlreadyConnected);
}
// Target input is empty so simply create the connection.
let edge = edges.insert(ConnectedEdges {
owner_node_index: source,
output_index: source_output,
});
destination_node.inputs[destination_input] = Some(edge);
Ok(())
}
fn traverse_node(
cache: &mut HashMap<Index, usize>,
levels_map: &mut MultiMap<usize, TraversedGraphNode>,
nodes: &RwLockReadGuard<Arena<ConnectedNode>>,
edges: &RwLockReadGuard<Arena<ConnectedEdges>>,
node_index: Index,
level: usize,
) {
//Build traverse node with input/output mapping info.
let mut traversed_node = TraversedGraphNode {
index: node_index,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
};
// Remove from dependencies from all levels lower
let mut has_retained = false;
for l in level..0 {
// Remove previous traversed node from level.
let vec = levels_map.get_vec_mut(&l).unwrap();
let before_len = vec.len();
vec.retain(|x| x.index!= node_index);
if before_len!= vec.len() {
has_retained = true;
}
}
// Update all inputs that still reference kicked out node.
if has_retained {
for l in level..0 {
let vec = levels_map.get_vec_mut(&l).unwrap();
for node in vec {
for input in &mut node.inputs {
if let Some((nlevel, _, index)) = input {
if index == &node_index {
*nlevel = level;
}
}
}
}
}
}
// Loop through all inputs
let next_level = level + 1;
let node = nodes.get(node_index).unwrap();
for (input_index, input) in node.inputs.iter().enumerate() {
if let Some(input) = input {
let edge = edges.get(*input).unwrap();
let inner_node = edge.owner_node_index;
traversed_node.inputs[input_index] = Some((next_level, edge.output_index, inner_node));
Self::traverse_node(cache, levels_map, nodes, edges, inner_node, next_level);
}
}
// Store traversed node at level.
//let traversed_index = levels_map.get_vec(&level).map(|x| x.len()).unwrap_or(0);
//cache.insert(node_index, traversed_index);
// TODO: Due to retaining this index breaks currently :'(
levels_map.insert(level, traversed_node);
}
/// Executes the graph using the given scheduler.
pub fn execute<T: DeviceHost>(
&mut self,
sc_frame: Arc<wgpu::SwapChainFrame>,
device_host: &'static T,
pool: &ThreadPool,
) {
{
{
optick::event!("FrameGraph::traverse");
// Gain read access to nodes and connections.
let nodes = self.node_arena.read();
let edges = self.edges_arena.read();
// Start traversing from end.
self.levels_map.clear();
Self::traverse_node(
&mut self.traversed_node_cache,
&mut self.levels_map,
&nodes,
&edges,
self.end_node,
0,
);
}
let cache = &mut self.traversed_node_cache;
// Create async executer.
let mut local_pool = futures::executor::LocalPool::new();
let local_spawner = local_pool.spawner();
// Execute in levels order
let mut all_levels = self.levels_map.keys().cloned().collect::<Vec<_>>();
all_levels.sort_unstable();
let max_levels = all_levels.len();
for level in all_levels.into_iter().rev() {
optick::event!("FrameGraph::execute_level");
optick::tag!("level", level as u32);
// Get rid of duplicated nodes.
let mut nodes_in_level = self.levels_map.get_vec_mut(&level).unwrap().clone();
nodes_in_level.sort_unstable_by_key(|x| x.index);
nodes_in_level.dedup_by_key(|x| x.index);
// Build cache for this level
for (index, node) in nodes_in_level.iter().enumerate() {
cache.insert(node.index, index);
}
// Get chunks
let nodes = self.node_arena.read();
let read_nodes = nodes_in_level
.iter()
.map(|node| (nodes.get(node.index).unwrap(), node.inputs))
.collect::<Vec<_>>();
let mut empty = [Vec::with_capacity(0)];
#[allow(clippy::type_complexity)]
let (outputs, previous_outputs): (
&mut [Vec<Option<FrameNodeValue>>],
&mut [Vec<Option<FrameNodeValue>>],
) = if level == (max_levels - 1) {
(&mut self.output_map, &mut empty)
} else {
self.output_map.split_at_mut(level + 1)
};
let outputs_per_node = outputs[outputs.len() - 1]
.chunks_mut(MAX_INPUT_OUTPUTS_PER_NODE)
.enumerate()
.collect::<Vec<_>>();
// Execute
let encoder_outputs = pool.install(|| {
read_nodes
.par_iter()
.zip(outputs_per_node)
.enumerate()
.map(|(_i, ((node, inputs), (_oi, outputs)))| {
optick::event!("FrameGraph::node");
// Prepare node execution
optick::tag!("name", node.name);
let node_trait = node.node.clone();
let label = format!("NodeCommandEncoder_{}", node.name);
// Map outputs -> inputs.
/*
for (idx, input) in inputs.iter().enumerate() {
if let Some((target_level, output_index, node_index)) = input {
let i = cache.get(&node_index).unwrap();
println!(
"Mapping input #{} to level = {} ({}) and index = {} ({}, {})",
idx,
target_level,
previous_outputs.len() - (target_level - level),
i * MAX_INPUT_OUTPUTS_PER_NODE + output_index,
i,
output_index
);
} else {
println!("Mapping input #{} to None", i);
}
}
*/
let inputs = inputs
.iter()
.map(|input| {
input.map(|(target_level, output_index, node_index)| {
let i = cache.get(&node_index).unwrap();
&previous_outputs[previous_outputs.len() - (target_level - level)]
[i * MAX_INPUT_OUTPUTS_PER_NODE + output_index]
})
})
.map(|input| match input {
Some(Some(rf)) => Some(rf.clone()),
_ => None,
})
.collect::<Vec<_>>();
let sc_cloned = sc_frame.clone();
let out = {
optick::event!("FrameGraph::record_commands");
optick::tag!("name", label);
// Execute node asynchronisly.
node_trait.execute_raw(
&inputs,
outputs,
device_host.get_device(),
device_host.get_queue(),
&*sc_cloned,
)
};
out
})
.collect::<Vec<_>>()
});
{
optick::event!("FrameGraph::submit_level");
optick::tag!("level", level as u32);
let mut buffers = Vec::with_capacity(encoder_outputs.len());
for out in encoder_outputs {
if let Some(buffer) = out.command_buffer {
buffers.push(buffer);
}
}
device_host.get_queue().submit(buffers);
}
}
}
// Reset
optick::event!("FrameGraph::reset");
self.reset();
}
}
#[derive(Clone)]
pub enum FrameNodeValue {
Buffer(ResourceRc<Buffer>),
BindGroup(ResourceRc<BindGroup>),
TextureView(ResourceRc<TextureView>),
SampledTexture(SampledTexture),
}
impl std::fmt::Debug for FrameNodeValue {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Buffer(_) => f.write_str("Buffer"),
Self::BindGroup(_) => f.write_str("BindGroup"),
Self::TextureView(_) => f.write_str("Texture"),
Self::SampledTexture(_) => f.write_str("SampledTexture"),
}
}
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum GraphConnectError {
#[error("The target node has reached its input limit")]
MaximumInputsReached,
#[error("The source node has reached its outputs limit")]
MaximumOutputsReached,
#[error("The target node does not exist")]
InvalidDestination,
#[error("The target nodes input is already connected")]
AlreadyConnected,
}
#[derive(Clone)]
struct TraversedGraphNode {
index: Index,
inputs: [Option<(usize, usize, Index)>; MAX_INPUT_OUTPUTS_PER_NODE],
}
pub trait DeviceHost: Send + Sync +'static {
fn get_device(&self) -> &wgpu::Device;
fn get_queue(&self) -> &wgpu::Queue;
}
macro_rules! impl_get_node_specific {
($getter:ident, $ty:ident, $rty:ty) => {
impl FrameNodeValue {
pub fn $getter(&self) -> &$rty {
match self {
FrameNodeValue::$ty(group) => group,
_ => panic!(
"Unexpected frame node value, expected '{}' but received '{:?}'",
stringify!($ty),
self
),
}
}
}
};
}
impl_get_node_specific!(get_bind_group, BindGroup, ResourceRc<BindGroup>);
impl_get_node_specific!(get_texture_view, TextureView, ResourceRc<TextureView>);
impl_get_node_specific!(get_sampled_texture, SampledTexture, SampledTexture);
| FrameGraph | identifier_name |
graph.rs | use crate::{CommandEncoder, CommandEncoderOutput};
use generational_arena::Arena;
use moonwave_resources::{BindGroup, Buffer, ResourceRc, SampledTexture, TextureView};
use multimap::MultiMap;
use parking_lot::{RwLock, RwLockReadGuard};
use rayon::{prelude::*, ThreadPool};
use std::{
collections::HashMap,
fmt::{Debug, Formatter},
sync::Arc,
};
pub use generational_arena::Index;
pub trait FrameGraphNode: Send + Sync +'static {
fn execute(
&self,
_inputs: &[Option<FrameNodeValue>],
_outputs: &mut [Option<FrameNodeValue>],
_encoder: &mut CommandEncoder,
) {
}
fn execute_raw(
&self,
inputs: &[Option<FrameNodeValue>],
outputs: &mut [Option<FrameNodeValue>],
device: &wgpu::Device,
_queue: &wgpu::Queue,
_sc_frame: &wgpu::SwapChainFrame,
) -> CommandEncoderOutput {
let mut encoder = CommandEncoder::new(device, "NodeGraphEncoder");
self.execute(inputs, outputs, &mut encoder);
encoder.finish()
}
}
const MAX_LAYERS: usize = 8;
const MAX_NODES_PER_LAYER: usize = 8;
const MAX_INPUT_OUTPUTS_PER_NODE: usize = 16;
struct ConnectedNode {
name: String,
node: Arc<dyn FrameGraphNode>,
inputs: [Option<Index>; MAX_INPUT_OUTPUTS_PER_NODE],
}
struct ConnectedEdges {
owner_node_index: Index,
output_index: usize,
}
pub struct FrameGraph {
node_arena: RwLock<Arena<ConnectedNode>>,
edges_arena: RwLock<Arena<ConnectedEdges>>,
end_node: Index,
output_map: Vec<Vec<Option<FrameNodeValue>>>,
levels_map: MultiMap<usize, TraversedGraphNode>,
traversed_node_cache: HashMap<Index, usize>,
}
impl FrameGraph {
/// Creates a new empty graph.
pub fn new<T: FrameGraphNode>(end_node: T) -> Self {
let mut node_arena = Arena::with_capacity(MAX_LAYERS * MAX_NODES_PER_LAYER);
let end_node = node_arena.insert(ConnectedNode {
name: "EndNode".to_string(),
node: Arc::new(end_node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
Self {
node_arena: RwLock::new(node_arena),
edges_arena: RwLock::new(Arena::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
)),
output_map: vec![vec![None; MAX_NODES_PER_LAYER * MAX_INPUT_OUTPUTS_PER_NODE]; MAX_LAYERS],
levels_map: MultiMap::with_capacity(MAX_LAYERS),
traversed_node_cache: HashMap::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
),
end_node,
}
}
/// Returns the end node.
pub fn get_end_node(&self) -> Index {
self.end_node
}
/// Resets the frame graph by removing all nodes and sets up a new end node.
pub fn reset(&mut self) {
let mut nodes = self.node_arena.write();
let end_node_impl = nodes.get(self.end_node).unwrap().node.clone();
nodes.clear();
self.traversed_node_cache.clear();
self.edges_arena.write().clear();
self.end_node = nodes.insert(ConnectedNode {
name: "EndNode".to_string(),
node: end_node_impl,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
}
/// Add a new node into the graph.
pub fn add_node<T: FrameGraphNode>(&self, node: T, name: &str) -> Index {
self.node_arena.write().insert(ConnectedNode {
name: name.to_string(),
node: Arc::new(node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
})
}
/// Connects one nodes output to another nodes input.
pub fn connect(
&self,
source: Index,
source_output: usize,
destination: Index,
destination_input: usize,
) -> Result<(), GraphConnectError> {
// Validate connection parameters.
if destination_input >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
if source_output >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
let mut edges = self.edges_arena.write();
let mut nodes = self.node_arena.write();
let destination_node = nodes
.get_mut(destination)
.ok_or(GraphConnectError::InvalidDestination)?;
// Target input is already connected.
if destination_node.inputs[destination_input].is_some() {
return Err(GraphConnectError::AlreadyConnected);
}
// Target input is empty so simply create the connection.
let edge = edges.insert(ConnectedEdges {
owner_node_index: source,
output_index: source_output,
});
destination_node.inputs[destination_input] = Some(edge);
Ok(())
}
fn traverse_node(
cache: &mut HashMap<Index, usize>,
levels_map: &mut MultiMap<usize, TraversedGraphNode>,
nodes: &RwLockReadGuard<Arena<ConnectedNode>>,
edges: &RwLockReadGuard<Arena<ConnectedEdges>>,
node_index: Index,
level: usize,
) {
//Build traverse node with input/output mapping info.
let mut traversed_node = TraversedGraphNode {
index: node_index,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
};
// Remove from dependencies from all levels lower
let mut has_retained = false;
for l in level..0 {
// Remove previous traversed node from level.
let vec = levels_map.get_vec_mut(&l).unwrap();
let before_len = vec.len();
vec.retain(|x| x.index!= node_index);
if before_len!= vec.len() {
has_retained = true;
}
}
// Update all inputs that still reference kicked out node.
if has_retained {
for l in level..0 {
let vec = levels_map.get_vec_mut(&l).unwrap();
for node in vec {
for input in &mut node.inputs {
if let Some((nlevel, _, index)) = input {
if index == &node_index {
*nlevel = level;
}
}
}
}
}
}
// Loop through all inputs
let next_level = level + 1;
let node = nodes.get(node_index).unwrap();
for (input_index, input) in node.inputs.iter().enumerate() {
if let Some(input) = input {
let edge = edges.get(*input).unwrap();
let inner_node = edge.owner_node_index;
traversed_node.inputs[input_index] = Some((next_level, edge.output_index, inner_node));
Self::traverse_node(cache, levels_map, nodes, edges, inner_node, next_level);
}
}
// Store traversed node at level.
//let traversed_index = levels_map.get_vec(&level).map(|x| x.len()).unwrap_or(0);
//cache.insert(node_index, traversed_index);
// TODO: Due to retaining this index breaks currently :'(
levels_map.insert(level, traversed_node);
}
/// Executes the graph using the given scheduler.
pub fn execute<T: DeviceHost>(
&mut self,
sc_frame: Arc<wgpu::SwapChainFrame>,
device_host: &'static T,
pool: &ThreadPool,
) {
{
{
optick::event!("FrameGraph::traverse");
// Gain read access to nodes and connections.
let nodes = self.node_arena.read();
let edges = self.edges_arena.read();
// Start traversing from end.
self.levels_map.clear();
Self::traverse_node(
&mut self.traversed_node_cache,
&mut self.levels_map,
&nodes,
&edges,
self.end_node,
0,
);
}
let cache = &mut self.traversed_node_cache;
// Create async executer.
let mut local_pool = futures::executor::LocalPool::new();
let local_spawner = local_pool.spawner();
// Execute in levels order
let mut all_levels = self.levels_map.keys().cloned().collect::<Vec<_>>();
all_levels.sort_unstable();
let max_levels = all_levels.len();
for level in all_levels.into_iter().rev() {
optick::event!("FrameGraph::execute_level");
optick::tag!("level", level as u32);
// Get rid of duplicated nodes.
let mut nodes_in_level = self.levels_map.get_vec_mut(&level).unwrap().clone();
nodes_in_level.sort_unstable_by_key(|x| x.index);
nodes_in_level.dedup_by_key(|x| x.index);
// Build cache for this level
for (index, node) in nodes_in_level.iter().enumerate() {
cache.insert(node.index, index);
}
// Get chunks
let nodes = self.node_arena.read();
let read_nodes = nodes_in_level
.iter()
.map(|node| (nodes.get(node.index).unwrap(), node.inputs))
.collect::<Vec<_>>();
let mut empty = [Vec::with_capacity(0)];
#[allow(clippy::type_complexity)]
let (outputs, previous_outputs): (
&mut [Vec<Option<FrameNodeValue>>],
&mut [Vec<Option<FrameNodeValue>>],
) = if level == (max_levels - 1) {
(&mut self.output_map, &mut empty)
} else {
self.output_map.split_at_mut(level + 1)
};
let outputs_per_node = outputs[outputs.len() - 1]
.chunks_mut(MAX_INPUT_OUTPUTS_PER_NODE)
.enumerate()
.collect::<Vec<_>>();
// Execute
let encoder_outputs = pool.install(|| {
read_nodes
.par_iter()
.zip(outputs_per_node)
.enumerate()
.map(|(_i, ((node, inputs), (_oi, outputs)))| {
optick::event!("FrameGraph::node");
// Prepare node execution
optick::tag!("name", node.name);
let node_trait = node.node.clone();
let label = format!("NodeCommandEncoder_{}", node.name);
// Map outputs -> inputs.
/*
for (idx, input) in inputs.iter().enumerate() {
if let Some((target_level, output_index, node_index)) = input {
let i = cache.get(&node_index).unwrap();
println!(
"Mapping input #{} to level = {} ({}) and index = {} ({}, {})",
idx,
target_level,
previous_outputs.len() - (target_level - level),
i * MAX_INPUT_OUTPUTS_PER_NODE + output_index,
i,
output_index
);
} else {
println!("Mapping input #{} to None", i);
}
}
*/
let inputs = inputs
.iter()
.map(|input| {
input.map(|(target_level, output_index, node_index)| {
let i = cache.get(&node_index).unwrap();
&previous_outputs[previous_outputs.len() - (target_level - level)]
[i * MAX_INPUT_OUTPUTS_PER_NODE + output_index]
})
})
.map(|input| match input {
Some(Some(rf)) => Some(rf.clone()),
_ => None,
})
.collect::<Vec<_>>();
let sc_cloned = sc_frame.clone();
let out = {
optick::event!("FrameGraph::record_commands");
optick::tag!("name", label);
// Execute node asynchronisly.
node_trait.execute_raw(
&inputs,
outputs,
device_host.get_device(),
device_host.get_queue(),
&*sc_cloned,
)
};
out
})
.collect::<Vec<_>>()
});
{
optick::event!("FrameGraph::submit_level");
optick::tag!("level", level as u32);
let mut buffers = Vec::with_capacity(encoder_outputs.len());
for out in encoder_outputs {
if let Some(buffer) = out.command_buffer |
}
device_host.get_queue().submit(buffers);
}
}
}
// Reset
optick::event!("FrameGraph::reset");
self.reset();
}
}
#[derive(Clone)]
pub enum FrameNodeValue {
Buffer(ResourceRc<Buffer>),
BindGroup(ResourceRc<BindGroup>),
TextureView(ResourceRc<TextureView>),
SampledTexture(SampledTexture),
}
impl std::fmt::Debug for FrameNodeValue {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Buffer(_) => f.write_str("Buffer"),
Self::BindGroup(_) => f.write_str("BindGroup"),
Self::TextureView(_) => f.write_str("Texture"),
Self::SampledTexture(_) => f.write_str("SampledTexture"),
}
}
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum GraphConnectError {
#[error("The target node has reached its input limit")]
MaximumInputsReached,
#[error("The source node has reached its outputs limit")]
MaximumOutputsReached,
#[error("The target node does not exist")]
InvalidDestination,
#[error("The target nodes input is already connected")]
AlreadyConnected,
}
#[derive(Clone)]
struct TraversedGraphNode {
index: Index,
inputs: [Option<(usize, usize, Index)>; MAX_INPUT_OUTPUTS_PER_NODE],
}
pub trait DeviceHost: Send + Sync +'static {
fn get_device(&self) -> &wgpu::Device;
fn get_queue(&self) -> &wgpu::Queue;
}
macro_rules! impl_get_node_specific {
($getter:ident, $ty:ident, $rty:ty) => {
impl FrameNodeValue {
pub fn $getter(&self) -> &$rty {
match self {
FrameNodeValue::$ty(group) => group,
_ => panic!(
"Unexpected frame node value, expected '{}' but received '{:?}'",
stringify!($ty),
self
),
}
}
}
};
}
impl_get_node_specific!(get_bind_group, BindGroup, ResourceRc<BindGroup>);
impl_get_node_specific!(get_texture_view, TextureView, ResourceRc<TextureView>);
impl_get_node_specific!(get_sampled_texture, SampledTexture, SampledTexture);
| {
buffers.push(buffer);
} | conditional_block |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0)!= 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
} |
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if!self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasn’t a field in sorting, we’ve to use |filter|, not
// |take_while| as top results might’ve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit can’t deal
// any damage to any enemy unit, then don’t mark chosen.
enemy.groups[j].is_alive()
&&!chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as!1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost!= hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
} |
fn is_alive(&self) -> bool {
self.units > 0
} | random_line_split |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0)!= 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 |
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if!self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasn’t a field in sorting, we’ve to use |filter|, not
// |take_while| as top results might’ve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit can’t deal
// any damage to any enemy unit, then don’t mark chosen.
enemy.groups[j].is_alive()
&&!chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as!1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost!= hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
}
| {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
} | identifier_body |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0)!= 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn | (&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if!self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasn’t a field in sorting, we’ve to use |filter|, not
// |take_while| as top results might’ve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit can’t deal
// any damage to any enemy unit, then don’t mark chosen.
enemy.groups[j].is_alive()
&&!chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as!1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost!= hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
}
| effective_power | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.